python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Free Electrons
* Copyright (C) 2017 NextThing Co
*
* Author: Boris Brezillon <[email protected]>
*/
#include <linux/slab.h>
#include "internals.h"
/*
* Special Micron status bit 3 indicates that the block has been
* corrected by on-die ECC and should be rewritten.
*/
#define NAND_ECC_STATUS_WRITE_RECOMMENDED BIT(3)
/*
* On chips with 8-bit ECC and additional bit can be used to distinguish
* cases where a errors were corrected without needing a rewrite
*
* Bit 4 Bit 3 Bit 0 Description
* ----- ----- ----- -----------
* 0 0 0 No Errors
* 0 0 1 Multiple uncorrected errors
* 0 1 0 4 - 6 errors corrected, recommend rewrite
* 0 1 1 Reserved
* 1 0 0 1 - 3 errors corrected
* 1 0 1 Reserved
* 1 1 0 7 - 8 errors corrected, recommend rewrite
*/
#define NAND_ECC_STATUS_MASK (BIT(4) | BIT(3) | BIT(0))
#define NAND_ECC_STATUS_UNCORRECTABLE BIT(0)
#define NAND_ECC_STATUS_4_6_CORRECTED BIT(3)
#define NAND_ECC_STATUS_1_3_CORRECTED BIT(4)
#define NAND_ECC_STATUS_7_8_CORRECTED (BIT(4) | BIT(3))
struct nand_onfi_vendor_micron {
u8 two_plane_read;
u8 read_cache;
u8 read_unique_id;
u8 dq_imped;
u8 dq_imped_num_settings;
u8 dq_imped_feat_addr;
u8 rb_pulldown_strength;
u8 rb_pulldown_strength_feat_addr;
u8 rb_pulldown_strength_num_settings;
u8 otp_mode;
u8 otp_page_start;
u8 otp_data_prot_addr;
u8 otp_num_pages;
u8 otp_feat_addr;
u8 read_retry_options;
u8 reserved[72];
u8 param_revision;
} __packed;
struct micron_on_die_ecc {
bool forced;
bool enabled;
void *rawbuf;
};
struct micron_nand {
struct micron_on_die_ecc ecc;
};
static int micron_nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
{
u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
}
/*
* Configure chip properties from Micron vendor-specific ONFI table
*/
static int micron_nand_onfi_init(struct nand_chip *chip)
{
struct nand_parameters *p = &chip->parameters;
if (p->onfi) {
struct nand_onfi_vendor_micron *micron = (void *)p->onfi->vendor;
chip->read_retries = micron->read_retry_options;
chip->ops.setup_read_retry = micron_nand_setup_read_retry;
}
if (p->supports_set_get_features) {
set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list);
set_bit(ONFI_FEATURE_ON_DIE_ECC, p->set_feature_list);
set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list);
set_bit(ONFI_FEATURE_ON_DIE_ECC, p->get_feature_list);
}
return 0;
}
static int micron_nand_on_die_4_ooblayout_ecc(struct mtd_info *mtd,
int section,
struct mtd_oob_region *oobregion)
{
if (section >= 4)
return -ERANGE;
oobregion->offset = (section * 16) + 8;
oobregion->length = 8;
return 0;
}
static int micron_nand_on_die_4_ooblayout_free(struct mtd_info *mtd,
int section,
struct mtd_oob_region *oobregion)
{
if (section >= 4)
return -ERANGE;
oobregion->offset = (section * 16) + 2;
oobregion->length = 6;
return 0;
}
static const struct mtd_ooblayout_ops micron_nand_on_die_4_ooblayout_ops = {
.ecc = micron_nand_on_die_4_ooblayout_ecc,
.free = micron_nand_on_die_4_ooblayout_free,
};
static int micron_nand_on_die_8_ooblayout_ecc(struct mtd_info *mtd,
int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section)
return -ERANGE;
oobregion->offset = mtd->oobsize - chip->ecc.total;
oobregion->length = chip->ecc.total;
return 0;
}
static int micron_nand_on_die_8_ooblayout_free(struct mtd_info *mtd,
int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section)
return -ERANGE;
oobregion->offset = 2;
oobregion->length = mtd->oobsize - chip->ecc.total - 2;
return 0;
}
static const struct mtd_ooblayout_ops micron_nand_on_die_8_ooblayout_ops = {
.ecc = micron_nand_on_die_8_ooblayout_ecc,
.free = micron_nand_on_die_8_ooblayout_free,
};
static int micron_nand_on_die_ecc_setup(struct nand_chip *chip, bool enable)
{
struct micron_nand *micron = nand_get_manufacturer_data(chip);
u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = { 0, };
int ret;
if (micron->ecc.forced)
return 0;
if (micron->ecc.enabled == enable)
return 0;
if (enable)
feature[0] |= ONFI_FEATURE_ON_DIE_ECC_EN;
ret = nand_set_features(chip, ONFI_FEATURE_ON_DIE_ECC, feature);
if (!ret)
micron->ecc.enabled = enable;
return ret;
}
static int micron_nand_on_die_ecc_status_4(struct nand_chip *chip, u8 status,
void *buf, int page,
int oob_required)
{
struct micron_nand *micron = nand_get_manufacturer_data(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int step, max_bitflips = 0;
bool use_datain = false;
int ret;
if (!(status & NAND_ECC_STATUS_WRITE_RECOMMENDED)) {
if (status & NAND_STATUS_FAIL)
mtd->ecc_stats.failed++;
return 0;
}
/*
* The internal ECC doesn't tell us the number of bitflips that have
* been corrected, but tells us if it recommends to rewrite the block.
* If it's the case, we need to read the page in raw mode and compare
* its content to the corrected version to extract the actual number of
* bitflips.
* But before we do that, we must make sure we have all OOB bytes read
* in non-raw mode, even if the user did not request those bytes.
*/
if (!oob_required) {
/*
* We first check which operation is supported by the controller
* before running it. This trick makes it possible to support
* all controllers, even the most constraints, without almost
* any performance hit.
*
* TODO: could be enhanced to avoid repeating the same check
* over and over in the fast path.
*/
if (!nand_has_exec_op(chip) ||
!nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
true))
use_datain = true;
if (use_datain)
ret = nand_read_data_op(chip, chip->oob_poi,
mtd->oobsize, false, false);
else
ret = nand_change_read_column_op(chip, mtd->writesize,
chip->oob_poi,
mtd->oobsize, false);
if (ret)
return ret;
}
micron_nand_on_die_ecc_setup(chip, false);
ret = nand_read_page_op(chip, page, 0, micron->ecc.rawbuf,
mtd->writesize + mtd->oobsize);
if (ret)
return ret;
for (step = 0; step < chip->ecc.steps; step++) {
unsigned int offs, i, nbitflips = 0;
u8 *rawbuf, *corrbuf;
offs = step * chip->ecc.size;
rawbuf = micron->ecc.rawbuf + offs;
corrbuf = buf + offs;
for (i = 0; i < chip->ecc.size; i++)
nbitflips += hweight8(corrbuf[i] ^ rawbuf[i]);
offs = (step * 16) + 4;
rawbuf = micron->ecc.rawbuf + mtd->writesize + offs;
corrbuf = chip->oob_poi + offs;
for (i = 0; i < chip->ecc.bytes + 4; i++)
nbitflips += hweight8(corrbuf[i] ^ rawbuf[i]);
if (WARN_ON(nbitflips > chip->ecc.strength))
return -EINVAL;
max_bitflips = max(nbitflips, max_bitflips);
mtd->ecc_stats.corrected += nbitflips;
}
return max_bitflips;
}
static int micron_nand_on_die_ecc_status_8(struct nand_chip *chip, u8 status)
{
struct mtd_info *mtd = nand_to_mtd(chip);
/*
* With 8/512 we have more information but still don't know precisely
* how many bit-flips were seen.
*/
switch (status & NAND_ECC_STATUS_MASK) {
case NAND_ECC_STATUS_UNCORRECTABLE:
mtd->ecc_stats.failed++;
return 0;
case NAND_ECC_STATUS_1_3_CORRECTED:
mtd->ecc_stats.corrected += 3;
return 3;
case NAND_ECC_STATUS_4_6_CORRECTED:
mtd->ecc_stats.corrected += 6;
/* rewrite recommended */
return 6;
case NAND_ECC_STATUS_7_8_CORRECTED:
mtd->ecc_stats.corrected += 8;
/* rewrite recommended */
return 8;
default:
return 0;
}
}
static int
micron_nand_read_page_on_die_ecc(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
bool use_datain = false;
u8 status;
int ret, max_bitflips = 0;
ret = micron_nand_on_die_ecc_setup(chip, true);
if (ret)
return ret;
ret = nand_read_page_op(chip, page, 0, NULL, 0);
if (ret)
goto out;
ret = nand_status_op(chip, &status);
if (ret)
goto out;
/*
* We first check which operation is supported by the controller before
* running it. This trick makes it possible to support all controllers,
* even the most constraints, without almost any performance hit.
*
* TODO: could be enhanced to avoid repeating the same check over and
* over in the fast path.
*/
if (!nand_has_exec_op(chip) ||
!nand_read_data_op(chip, buf, mtd->writesize, false, true))
use_datain = true;
if (use_datain) {
ret = nand_exit_status_op(chip);
if (ret)
goto out;
ret = nand_read_data_op(chip, buf, mtd->writesize, false,
false);
if (!ret && oob_required)
ret = nand_read_data_op(chip, chip->oob_poi,
mtd->oobsize, false, false);
} else {
ret = nand_change_read_column_op(chip, 0, buf, mtd->writesize,
false);
if (!ret && oob_required)
ret = nand_change_read_column_op(chip, mtd->writesize,
chip->oob_poi,
mtd->oobsize, false);
}
if (chip->ecc.strength == 4)
max_bitflips = micron_nand_on_die_ecc_status_4(chip, status,
buf, page,
oob_required);
else
max_bitflips = micron_nand_on_die_ecc_status_8(chip, status);
out:
micron_nand_on_die_ecc_setup(chip, false);
return ret ? ret : max_bitflips;
}
static int
micron_nand_write_page_on_die_ecc(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
int ret;
ret = micron_nand_on_die_ecc_setup(chip, true);
if (ret)
return ret;
ret = nand_write_page_raw(chip, buf, oob_required, page);
micron_nand_on_die_ecc_setup(chip, false);
return ret;
}
enum {
/* The NAND flash doesn't support on-die ECC */
MICRON_ON_DIE_UNSUPPORTED,
/*
* The NAND flash supports on-die ECC and it can be
* enabled/disabled by a set features command.
*/
MICRON_ON_DIE_SUPPORTED,
/*
* The NAND flash supports on-die ECC, and it cannot be
* disabled.
*/
MICRON_ON_DIE_MANDATORY,
};
#define MICRON_ID_INTERNAL_ECC_MASK GENMASK(1, 0)
#define MICRON_ID_ECC_ENABLED BIT(7)
/*
* Try to detect if the NAND support on-die ECC. To do this, we enable
* the feature, and read back if it has been enabled as expected. We
* also check if it can be disabled, because some Micron NANDs do not
* allow disabling the on-die ECC and we don't support such NANDs for
* now.
*
* This function also has the side effect of disabling on-die ECC if
* it had been left enabled by the firmware/bootloader.
*/
static int micron_supports_on_die_ecc(struct nand_chip *chip)
{
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(&chip->base);
u8 id[5];
int ret;
if (!chip->parameters.onfi)
return MICRON_ON_DIE_UNSUPPORTED;
if (nanddev_bits_per_cell(&chip->base) != 1)
return MICRON_ON_DIE_UNSUPPORTED;
/*
* We only support on-die ECC of 4/512 or 8/512
*/
if (requirements->strength != 4 && requirements->strength != 8)
return MICRON_ON_DIE_UNSUPPORTED;
/* 0x2 means on-die ECC is available. */
if (chip->id.len != 5 ||
(chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2)
return MICRON_ON_DIE_UNSUPPORTED;
/*
* It seems that there are devices which do not support ECC officially.
* At least the MT29F2G08ABAGA / MT29F2G08ABBGA devices supports
* enabling the ECC feature but don't reflect that to the READ_ID table.
* So we have to guarantee that we disable the ECC feature directly
* after we did the READ_ID table command. Later we can evaluate the
* ECC_ENABLE support.
*/
ret = micron_nand_on_die_ecc_setup(chip, true);
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
ret = nand_readid_op(chip, 0, id, sizeof(id));
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
ret = micron_nand_on_die_ecc_setup(chip, false);
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
if (!(id[4] & MICRON_ID_ECC_ENABLED))
return MICRON_ON_DIE_UNSUPPORTED;
ret = nand_readid_op(chip, 0, id, sizeof(id));
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
if (id[4] & MICRON_ID_ECC_ENABLED)
return MICRON_ON_DIE_MANDATORY;
/*
* We only support on-die ECC of 4/512 or 8/512
*/
if (requirements->strength != 4 && requirements->strength != 8)
return MICRON_ON_DIE_UNSUPPORTED;
return MICRON_ON_DIE_SUPPORTED;
}
static int micron_nand_init(struct nand_chip *chip)
{
struct nand_device *base = &chip->base;
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(base);
struct mtd_info *mtd = nand_to_mtd(chip);
struct micron_nand *micron;
int ondie;
int ret;
micron = kzalloc(sizeof(*micron), GFP_KERNEL);
if (!micron)
return -ENOMEM;
nand_set_manufacturer_data(chip, micron);
ret = micron_nand_onfi_init(chip);
if (ret)
goto err_free_manuf_data;
chip->options |= NAND_BBM_FIRSTPAGE;
if (mtd->writesize == 2048)
chip->options |= NAND_BBM_SECONDPAGE;
ondie = micron_supports_on_die_ecc(chip);
if (ondie == MICRON_ON_DIE_MANDATORY &&
chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_DIE) {
pr_err("On-die ECC forcefully enabled, not supported\n");
ret = -EINVAL;
goto err_free_manuf_data;
}
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE) {
if (ondie == MICRON_ON_DIE_UNSUPPORTED) {
pr_err("On-die ECC selected but not supported\n");
ret = -EINVAL;
goto err_free_manuf_data;
}
if (ondie == MICRON_ON_DIE_MANDATORY) {
micron->ecc.forced = true;
micron->ecc.enabled = true;
}
/*
* In case of 4bit on-die ECC, we need a buffer to store a
* page dumped in raw mode so that we can compare its content
* to the same page after ECC correction happened and extract
* the real number of bitflips from this comparison.
* That's not needed for 8-bit ECC, because the status expose
* a better approximation of the number of bitflips in a page.
*/
if (requirements->strength == 4) {
micron->ecc.rawbuf = kmalloc(mtd->writesize +
mtd->oobsize,
GFP_KERNEL);
if (!micron->ecc.rawbuf) {
ret = -ENOMEM;
goto err_free_manuf_data;
}
}
if (requirements->strength == 4)
mtd_set_ooblayout(mtd,
µn_nand_on_die_4_ooblayout_ops);
else
mtd_set_ooblayout(mtd,
µn_nand_on_die_8_ooblayout_ops);
chip->ecc.bytes = requirements->strength * 2;
chip->ecc.size = 512;
chip->ecc.strength = requirements->strength;
chip->ecc.algo = NAND_ECC_ALGO_BCH;
chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
if (ondie == MICRON_ON_DIE_MANDATORY) {
chip->ecc.read_page_raw = nand_read_page_raw_notsupp;
chip->ecc.write_page_raw = nand_write_page_raw_notsupp;
} else {
if (!chip->ecc.read_page_raw)
chip->ecc.read_page_raw = nand_read_page_raw;
if (!chip->ecc.write_page_raw)
chip->ecc.write_page_raw = nand_write_page_raw;
}
}
return 0;
err_free_manuf_data:
kfree(micron->ecc.rawbuf);
kfree(micron);
return ret;
}
static void micron_nand_cleanup(struct nand_chip *chip)
{
struct micron_nand *micron = nand_get_manufacturer_data(chip);
kfree(micron->ecc.rawbuf);
kfree(micron);
}
static void micron_fixup_onfi_param_page(struct nand_chip *chip,
struct nand_onfi_params *p)
{
/*
* MT29F1G08ABAFAWP-ITE:F and possibly others report 00 00 for the
* revision number field of the ONFI parameter page. Assume ONFI
* version 1.0 if the revision number is 00 00.
*/
if (le16_to_cpu(p->revision) == 0)
p->revision = cpu_to_le16(ONFI_VERSION_1_0);
}
const struct nand_manufacturer_ops micron_nand_manuf_ops = {
.init = micron_nand_init,
.cleanup = micron_nand_cleanup,
.fixup_onfi_param_page = micron_fixup_onfi_param_page,
};
| linux-master | drivers/mtd/nand/raw/nand_micron.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Free Electrons
* Copyright (C) 2017 NextThing Co
*
* Author: Boris Brezillon <[email protected]>
*/
#include <linux/sizes.h>
#include <linux/slab.h>
#include "internals.h"
#define NAND_HYNIX_CMD_SET_PARAMS 0x36
#define NAND_HYNIX_CMD_APPLY_PARAMS 0x16
#define NAND_HYNIX_1XNM_RR_REPEAT 8
/**
* struct hynix_read_retry - read-retry data
* @nregs: number of register to set when applying a new read-retry mode
* @regs: register offsets (NAND chip dependent)
* @values: array of values to set in registers. The array size is equal to
* (nregs * nmodes)
*/
struct hynix_read_retry {
int nregs;
const u8 *regs;
u8 values[];
};
/**
* struct hynix_nand - private Hynix NAND struct
* @nand_technology: manufacturing process expressed in picometer
* @read_retry: read-retry information
*/
struct hynix_nand {
const struct hynix_read_retry *read_retry;
};
/**
* struct hynix_read_retry_otp - structure describing how the read-retry OTP
* area
* @nregs: number of hynix private registers to set before reading the reading
* the OTP area
* @regs: registers that should be configured
* @values: values that should be set in regs
* @page: the address to pass to the READ_PAGE command. Depends on the NAND
* chip
* @size: size of the read-retry OTP section
*/
struct hynix_read_retry_otp {
int nregs;
const u8 *regs;
const u8 *values;
int page;
int size;
};
static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
{
u8 jedecid[5] = { };
int ret;
ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid));
if (ret)
return false;
return !strncmp("JEDEC", jedecid, sizeof(jedecid));
}
static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
{
if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_CMD(cmd, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
chip->legacy.cmdfunc(chip, cmd, -1, -1);
return 0;
}
static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
{
u16 column = ((u16)addr << 8) | addr;
if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_ADDR(1, &addr, 0),
NAND_OP_8BIT_DATA_OUT(1, &val, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
chip->legacy.cmdfunc(chip, NAND_CMD_NONE, column, -1);
chip->legacy.write_byte(chip, val);
return 0;
}
static int hynix_nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
{
struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
const u8 *values;
int i, ret;
values = hynix->read_retry->values +
(retry_mode * hynix->read_retry->nregs);
/* Enter 'Set Hynix Parameters' mode */
ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
if (ret)
return ret;
/*
* Configure the NAND in the requested read-retry mode.
* This is done by setting pre-defined values in internal NAND
* registers.
*
* The set of registers is NAND specific, and the values are either
* predefined or extracted from an OTP area on the NAND (values are
* probably tweaked at production in this case).
*/
for (i = 0; i < hynix->read_retry->nregs; i++) {
ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i],
values[i]);
if (ret)
return ret;
}
/* Apply the new settings. */
return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
}
/**
* hynix_get_majority - get the value that is occurring the most in a given
* set of values
* @in: the array of values to test
* @repeat: the size of the in array
* @out: pointer used to store the output value
*
* This function implements the 'majority check' logic that is supposed to
* overcome the unreliability of MLC NANDs when reading the OTP area storing
* the read-retry parameters.
*
* It's based on a pretty simple assumption: if we repeat the same value
* several times and then take the one that is occurring the most, we should
* find the correct value.
* Let's hope this dummy algorithm prevents us from losing the read-retry
* parameters.
*/
static int hynix_get_majority(const u8 *in, int repeat, u8 *out)
{
int i, j, half = repeat / 2;
/*
* We only test the first half of the in array because we must ensure
* that the value is at least occurring repeat / 2 times.
*
* This loop is suboptimal since we may count the occurrences of the
* same value several time, but we are doing that on small sets, which
* makes it acceptable.
*/
for (i = 0; i < half; i++) {
int cnt = 0;
u8 val = in[i];
/* Count all values that are matching the one at index i. */
for (j = i + 1; j < repeat; j++) {
if (in[j] == val)
cnt++;
}
/* We found a value occurring more than repeat / 2. */
if (cnt > half) {
*out = val;
return 0;
}
}
return -EIO;
}
static int hynix_read_rr_otp(struct nand_chip *chip,
const struct hynix_read_retry_otp *info,
void *buf)
{
int i, ret;
ret = nand_reset_op(chip);
if (ret)
return ret;
ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
if (ret)
return ret;
for (i = 0; i < info->nregs; i++) {
ret = hynix_nand_reg_write_op(chip, info->regs[i],
info->values[i]);
if (ret)
return ret;
}
ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
if (ret)
return ret;
/* Sequence to enter OTP mode? */
ret = hynix_nand_cmd_op(chip, 0x17);
if (ret)
return ret;
ret = hynix_nand_cmd_op(chip, 0x4);
if (ret)
return ret;
ret = hynix_nand_cmd_op(chip, 0x19);
if (ret)
return ret;
/* Now read the page */
ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
if (ret)
return ret;
/* Put everything back to normal */
ret = nand_reset_op(chip);
if (ret)
return ret;
ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
if (ret)
return ret;
ret = hynix_nand_reg_write_op(chip, 0x38, 0);
if (ret)
return ret;
ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
if (ret)
return ret;
return nand_read_page_op(chip, 0, 0, NULL, 0);
}
#define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0
#define NAND_HYNIX_1XNM_RR_REG_COUNT_OFFS 8
#define NAND_HYNIX_1XNM_RR_SET_OFFS(x, setsize, inv) \
(16 + ((((x) * 2) + ((inv) ? 1 : 0)) * (setsize)))
static int hynix_mlc_1xnm_rr_value(const u8 *buf, int nmodes, int nregs,
int mode, int reg, bool inv, u8 *val)
{
u8 tmp[NAND_HYNIX_1XNM_RR_REPEAT];
int val_offs = (mode * nregs) + reg;
int set_size = nmodes * nregs;
int i, ret;
for (i = 0; i < NAND_HYNIX_1XNM_RR_REPEAT; i++) {
int set_offs = NAND_HYNIX_1XNM_RR_SET_OFFS(i, set_size, inv);
tmp[i] = buf[val_offs + set_offs];
}
ret = hynix_get_majority(tmp, NAND_HYNIX_1XNM_RR_REPEAT, val);
if (ret)
return ret;
if (inv)
*val = ~*val;
return 0;
}
static u8 hynix_1xnm_mlc_read_retry_regs[] = {
0xcc, 0xbf, 0xaa, 0xab, 0xcd, 0xad, 0xae, 0xaf
};
static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip,
const struct hynix_read_retry_otp *info)
{
struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
struct hynix_read_retry *rr = NULL;
int ret, i, j;
u8 nregs, nmodes;
u8 *buf;
buf = kmalloc(info->size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = hynix_read_rr_otp(chip, info, buf);
if (ret)
goto out;
ret = hynix_get_majority(buf, NAND_HYNIX_1XNM_RR_REPEAT,
&nmodes);
if (ret)
goto out;
ret = hynix_get_majority(buf + NAND_HYNIX_1XNM_RR_REPEAT,
NAND_HYNIX_1XNM_RR_REPEAT,
&nregs);
if (ret)
goto out;
rr = kzalloc(sizeof(*rr) + (nregs * nmodes), GFP_KERNEL);
if (!rr) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < nmodes; i++) {
for (j = 0; j < nregs; j++) {
u8 *val = rr->values + (i * nregs);
ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
false, val);
if (!ret)
continue;
ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
true, val);
if (ret)
goto out;
}
}
rr->nregs = nregs;
rr->regs = hynix_1xnm_mlc_read_retry_regs;
hynix->read_retry = rr;
chip->ops.setup_read_retry = hynix_nand_setup_read_retry;
chip->read_retries = nmodes;
out:
kfree(buf);
if (ret)
kfree(rr);
return ret;
}
static const u8 hynix_mlc_1xnm_rr_otp_regs[] = { 0x38 };
static const u8 hynix_mlc_1xnm_rr_otp_values[] = { 0x52 };
static const struct hynix_read_retry_otp hynix_mlc_1xnm_rr_otps[] = {
{
.nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
.regs = hynix_mlc_1xnm_rr_otp_regs,
.values = hynix_mlc_1xnm_rr_otp_values,
.page = 0x21f,
.size = 784
},
{
.nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
.regs = hynix_mlc_1xnm_rr_otp_regs,
.values = hynix_mlc_1xnm_rr_otp_values,
.page = 0x200,
.size = 528,
},
};
static int hynix_nand_rr_init(struct nand_chip *chip)
{
int i, ret = 0;
bool valid_jedecid;
valid_jedecid = hynix_nand_has_valid_jedecid(chip);
/*
* We only support read-retry for 1xnm NANDs, and those NANDs all
* expose a valid JEDEC ID.
*/
if (valid_jedecid) {
u8 nand_tech = chip->id.data[5] >> 4;
/* 1xnm technology */
if (nand_tech == 4) {
for (i = 0; i < ARRAY_SIZE(hynix_mlc_1xnm_rr_otps);
i++) {
/*
* FIXME: Hynix recommend to copy the
* read-retry OTP area into a normal page.
*/
ret = hynix_mlc_1xnm_rr_init(chip,
hynix_mlc_1xnm_rr_otps);
if (!ret)
break;
}
}
}
if (ret)
pr_warn("failed to initialize read-retry infrastructure");
return 0;
}
static void hynix_nand_extract_oobsize(struct nand_chip *chip,
bool valid_jedecid)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
u8 oobsize;
memorg = nanddev_get_memorg(&chip->base);
oobsize = ((chip->id.data[3] >> 2) & 0x3) |
((chip->id.data[3] >> 4) & 0x4);
if (valid_jedecid) {
switch (oobsize) {
case 0:
memorg->oobsize = 2048;
break;
case 1:
memorg->oobsize = 1664;
break;
case 2:
memorg->oobsize = 1024;
break;
case 3:
memorg->oobsize = 640;
break;
default:
/*
* We should never reach this case, but if that
* happens, this probably means Hynix decided to use
* a different extended ID format, and we should find
* a way to support it.
*/
WARN(1, "Invalid OOB size");
break;
}
} else {
switch (oobsize) {
case 0:
memorg->oobsize = 128;
break;
case 1:
memorg->oobsize = 224;
break;
case 2:
memorg->oobsize = 448;
break;
case 3:
memorg->oobsize = 64;
break;
case 4:
memorg->oobsize = 32;
break;
case 5:
memorg->oobsize = 16;
break;
case 6:
memorg->oobsize = 640;
break;
default:
/*
* We should never reach this case, but if that
* happens, this probably means Hynix decided to use
* a different extended ID format, and we should find
* a way to support it.
*/
WARN(1, "Invalid OOB size");
break;
}
/*
* The datasheet of H27UCG8T2BTR mentions that the "Redundant
* Area Size" is encoded "per 8KB" (page size). This chip uses
* a page size of 16KiB. The datasheet mentions an OOB size of
* 1.280 bytes, but the OOB size encoded in the ID bytes (using
* the existing logic above) is 640 bytes.
* Update the OOB size for this chip by taking the value
* determined above and scaling it to the actual page size (so
* the actual OOB size for this chip is: 640 * 16k / 8k).
*/
if (chip->id.data[1] == 0xde)
memorg->oobsize *= memorg->pagesize / SZ_8K;
}
mtd->oobsize = memorg->oobsize;
}
static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
bool valid_jedecid)
{
struct nand_device *base = &chip->base;
struct nand_ecc_props requirements = {};
u8 ecc_level = (chip->id.data[4] >> 4) & 0x7;
if (valid_jedecid) {
/* Reference: H27UCG8T2E datasheet */
requirements.step_size = 1024;
switch (ecc_level) {
case 0:
requirements.step_size = 0;
requirements.strength = 0;
break;
case 1:
requirements.strength = 4;
break;
case 2:
requirements.strength = 24;
break;
case 3:
requirements.strength = 32;
break;
case 4:
requirements.strength = 40;
break;
case 5:
requirements.strength = 50;
break;
case 6:
requirements.strength = 60;
break;
default:
/*
* We should never reach this case, but if that
* happens, this probably means Hynix decided to use
* a different extended ID format, and we should find
* a way to support it.
*/
WARN(1, "Invalid ECC requirements");
}
} else {
/*
* The ECC requirements field meaning depends on the
* NAND technology.
*/
u8 nand_tech = chip->id.data[5] & 0x7;
if (nand_tech < 3) {
/* > 26nm, reference: H27UBG8T2A datasheet */
if (ecc_level < 5) {
requirements.step_size = 512;
requirements.strength = 1 << ecc_level;
} else if (ecc_level < 7) {
if (ecc_level == 5)
requirements.step_size = 2048;
else
requirements.step_size = 1024;
requirements.strength = 24;
} else {
/*
* We should never reach this case, but if that
* happens, this probably means Hynix decided
* to use a different extended ID format, and
* we should find a way to support it.
*/
WARN(1, "Invalid ECC requirements");
}
} else {
/* <= 26nm, reference: H27UBG8T2B datasheet */
if (!ecc_level) {
requirements.step_size = 0;
requirements.strength = 0;
} else if (ecc_level < 5) {
requirements.step_size = 512;
requirements.strength = 1 << (ecc_level - 1);
} else {
requirements.step_size = 1024;
requirements.strength = 24 +
(8 * (ecc_level - 5));
}
}
}
nanddev_set_ecc_requirements(base, &requirements);
}
static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
bool valid_jedecid)
{
u8 nand_tech;
/* We need scrambling on all TLC NANDs*/
if (nanddev_bits_per_cell(&chip->base) > 2)
chip->options |= NAND_NEED_SCRAMBLING;
/* And on MLC NANDs with sub-3xnm process */
if (valid_jedecid) {
nand_tech = chip->id.data[5] >> 4;
/* < 3xnm */
if (nand_tech > 0)
chip->options |= NAND_NEED_SCRAMBLING;
} else {
nand_tech = chip->id.data[5] & 0x7;
/* < 32nm */
if (nand_tech > 2)
chip->options |= NAND_NEED_SCRAMBLING;
}
}
static void hynix_nand_decode_id(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
bool valid_jedecid;
u8 tmp;
memorg = nanddev_get_memorg(&chip->base);
/*
* Exclude all SLC NANDs from this advanced detection scheme.
* According to the ranges defined in several datasheets, it might
* appear that even SLC NANDs could fall in this extended ID scheme.
* If that the case rework the test to let SLC NANDs go through the
* detection process.
*/
if (chip->id.len < 6 || nand_is_slc(chip)) {
nand_decode_ext_id(chip);
return;
}
/* Extract pagesize */
memorg->pagesize = 2048 << (chip->id.data[3] & 0x03);
mtd->writesize = memorg->pagesize;
tmp = (chip->id.data[3] >> 4) & 0x3;
/*
* When bit7 is set that means we start counting at 1MiB, otherwise
* we start counting at 128KiB and shift this value the content of
* ID[3][4:5].
* The only exception is when ID[3][4:5] == 3 and ID[3][7] == 0, in
* this case the erasesize is set to 768KiB.
*/
if (chip->id.data[3] & 0x80) {
memorg->pages_per_eraseblock = (SZ_1M << tmp) /
memorg->pagesize;
mtd->erasesize = SZ_1M << tmp;
} else if (tmp == 3) {
memorg->pages_per_eraseblock = (SZ_512K + SZ_256K) /
memorg->pagesize;
mtd->erasesize = SZ_512K + SZ_256K;
} else {
memorg->pages_per_eraseblock = (SZ_128K << tmp) /
memorg->pagesize;
mtd->erasesize = SZ_128K << tmp;
}
/*
* Modern Toggle DDR NANDs have a valid JEDECID even though they are
* not exposing a valid JEDEC parameter table.
* These NANDs use a different NAND ID scheme.
*/
valid_jedecid = hynix_nand_has_valid_jedecid(chip);
hynix_nand_extract_oobsize(chip, valid_jedecid);
hynix_nand_extract_ecc_requirements(chip, valid_jedecid);
hynix_nand_extract_scrambling_requirements(chip, valid_jedecid);
}
static void hynix_nand_cleanup(struct nand_chip *chip)
{
struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
if (!hynix)
return;
kfree(hynix->read_retry);
kfree(hynix);
nand_set_manufacturer_data(chip, NULL);
}
static int
h27ucg8t2atrbc_choose_interface_config(struct nand_chip *chip,
struct nand_interface_config *iface)
{
onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4);
return nand_choose_best_sdr_timings(chip, iface, NULL);
}
static int h27ucg8t2etrbc_init(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
chip->options |= NAND_NEED_SCRAMBLING;
mtd_set_pairing_scheme(mtd, &dist3_pairing_scheme);
return 0;
}
static int hynix_nand_init(struct nand_chip *chip)
{
struct hynix_nand *hynix;
int ret;
if (!nand_is_slc(chip))
chip->options |= NAND_BBM_LASTPAGE;
else
chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
hynix = kzalloc(sizeof(*hynix), GFP_KERNEL);
if (!hynix)
return -ENOMEM;
nand_set_manufacturer_data(chip, hynix);
if (!strncmp("H27UCG8T2ATR-BC", chip->parameters.model,
sizeof("H27UCG8T2ATR-BC") - 1))
chip->ops.choose_interface_config =
h27ucg8t2atrbc_choose_interface_config;
if (!strncmp("H27UCG8T2ETR-BC", chip->parameters.model,
sizeof("H27UCG8T2ETR-BC") - 1))
h27ucg8t2etrbc_init(chip);
ret = hynix_nand_rr_init(chip);
if (ret)
hynix_nand_cleanup(chip);
return ret;
}
static void hynix_fixup_onfi_param_page(struct nand_chip *chip,
struct nand_onfi_params *p)
{
/*
* Certain chips might report a 0 on sdr_timing_mode field
* (bytes 129-130). This has been seen on H27U4G8F2GDA-BI.
* According to ONFI specification, bit 0 of this field "shall be 1".
* Forcibly set this bit.
*/
p->sdr_timing_modes |= cpu_to_le16(BIT(0));
}
const struct nand_manufacturer_ops hynix_nand_manuf_ops = {
.detect = hynix_nand_decode_id,
.init = hynix_nand_init,
.cleanup = hynix_nand_cleanup,
.fixup_onfi_param_page = hynix_fixup_onfi_param_page,
};
| linux-master | drivers/mtd/nand/raw/nand_hynix.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2006 Jonathan McDowell <[email protected]>
*
* Derived from drivers/mtd/nand/toto.c (removed in v2.6.28)
* Copyright (c) 2003 Texas Instruments
* Copyright (c) 2002 Thomas Gleixner <[email protected]>
*
* Converted to platform driver by Janusz Krzysztofik <[email protected]>
* Partially stolen from plat_nand.c
*
* Overview:
* This is a device driver for the NAND flash device found on the
* Amstrad E3 (Delta).
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand-gpio.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
/*
* MTD structure for E3 (Delta)
*/
struct gpio_nand {
struct nand_controller base;
struct nand_chip nand_chip;
struct gpio_desc *gpiod_rdy;
struct gpio_desc *gpiod_nce;
struct gpio_desc *gpiod_nre;
struct gpio_desc *gpiod_nwp;
struct gpio_desc *gpiod_nwe;
struct gpio_desc *gpiod_ale;
struct gpio_desc *gpiod_cle;
struct gpio_descs *data_gpiods;
bool data_in;
unsigned int tRP;
unsigned int tWP;
u8 (*io_read)(struct gpio_nand *this);
void (*io_write)(struct gpio_nand *this, u8 byte);
};
static void gpio_nand_write_commit(struct gpio_nand *priv)
{
gpiod_set_value(priv->gpiod_nwe, 1);
ndelay(priv->tWP);
gpiod_set_value(priv->gpiod_nwe, 0);
}
static void gpio_nand_io_write(struct gpio_nand *priv, u8 byte)
{
struct gpio_descs *data_gpiods = priv->data_gpiods;
DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
gpiod_set_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
data_gpiods->info, values);
gpio_nand_write_commit(priv);
}
static void gpio_nand_dir_output(struct gpio_nand *priv, u8 byte)
{
struct gpio_descs *data_gpiods = priv->data_gpiods;
DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
int i;
for (i = 0; i < data_gpiods->ndescs; i++)
gpiod_direction_output_raw(data_gpiods->desc[i],
test_bit(i, values));
gpio_nand_write_commit(priv);
priv->data_in = false;
}
static u8 gpio_nand_io_read(struct gpio_nand *priv)
{
u8 res;
struct gpio_descs *data_gpiods = priv->data_gpiods;
DECLARE_BITMAP(values, BITS_PER_TYPE(res)) = { 0, };
gpiod_set_value(priv->gpiod_nre, 1);
ndelay(priv->tRP);
gpiod_get_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
data_gpiods->info, values);
gpiod_set_value(priv->gpiod_nre, 0);
res = values[0];
return res;
}
static void gpio_nand_dir_input(struct gpio_nand *priv)
{
struct gpio_descs *data_gpiods = priv->data_gpiods;
int i;
for (i = 0; i < data_gpiods->ndescs; i++)
gpiod_direction_input(data_gpiods->desc[i]);
priv->data_in = true;
}
static void gpio_nand_write_buf(struct gpio_nand *priv, const u8 *buf, int len)
{
int i = 0;
if (len > 0 && priv->data_in)
gpio_nand_dir_output(priv, buf[i++]);
while (i < len)
priv->io_write(priv, buf[i++]);
}
static void gpio_nand_read_buf(struct gpio_nand *priv, u8 *buf, int len)
{
int i;
if (priv->data_gpiods && !priv->data_in)
gpio_nand_dir_input(priv);
for (i = 0; i < len; i++)
buf[i] = priv->io_read(priv);
}
static void gpio_nand_ctrl_cs(struct gpio_nand *priv, bool assert)
{
gpiod_set_value(priv->gpiod_nce, assert);
}
static int gpio_nand_exec_op(struct nand_chip *this,
const struct nand_operation *op, bool check_only)
{
struct gpio_nand *priv = nand_get_controller_data(this);
const struct nand_op_instr *instr;
int ret = 0;
if (check_only)
return 0;
gpio_nand_ctrl_cs(priv, 1);
for (instr = op->instrs; instr < op->instrs + op->ninstrs; instr++) {
switch (instr->type) {
case NAND_OP_CMD_INSTR:
gpiod_set_value(priv->gpiod_cle, 1);
gpio_nand_write_buf(priv, &instr->ctx.cmd.opcode, 1);
gpiod_set_value(priv->gpiod_cle, 0);
break;
case NAND_OP_ADDR_INSTR:
gpiod_set_value(priv->gpiod_ale, 1);
gpio_nand_write_buf(priv, instr->ctx.addr.addrs,
instr->ctx.addr.naddrs);
gpiod_set_value(priv->gpiod_ale, 0);
break;
case NAND_OP_DATA_IN_INSTR:
gpio_nand_read_buf(priv, instr->ctx.data.buf.in,
instr->ctx.data.len);
break;
case NAND_OP_DATA_OUT_INSTR:
gpio_nand_write_buf(priv, instr->ctx.data.buf.out,
instr->ctx.data.len);
break;
case NAND_OP_WAITRDY_INSTR:
ret = priv->gpiod_rdy ?
nand_gpio_waitrdy(this, priv->gpiod_rdy,
instr->ctx.waitrdy.timeout_ms) :
nand_soft_waitrdy(this,
instr->ctx.waitrdy.timeout_ms);
break;
}
if (ret)
break;
}
gpio_nand_ctrl_cs(priv, 0);
return ret;
}
static int gpio_nand_setup_interface(struct nand_chip *this, int csline,
const struct nand_interface_config *cf)
{
struct gpio_nand *priv = nand_get_controller_data(this);
const struct nand_sdr_timings *sdr = nand_get_sdr_timings(cf);
struct device *dev = &nand_to_mtd(this)->dev;
if (IS_ERR(sdr))
return PTR_ERR(sdr);
if (csline == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
if (priv->gpiod_nre) {
priv->tRP = DIV_ROUND_UP(sdr->tRP_min, 1000);
dev_dbg(dev, "using %u ns read pulse width\n", priv->tRP);
}
priv->tWP = DIV_ROUND_UP(sdr->tWP_min, 1000);
dev_dbg(dev, "using %u ns write pulse width\n", priv->tWP);
return 0;
}
static int gpio_nand_attach_chip(struct nand_chip *chip)
{
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
static const struct nand_controller_ops gpio_nand_ops = {
.exec_op = gpio_nand_exec_op,
.attach_chip = gpio_nand_attach_chip,
.setup_interface = gpio_nand_setup_interface,
};
/*
* Main initialization routine
*/
static int gpio_nand_probe(struct platform_device *pdev)
{
struct gpio_nand_platdata *pdata = dev_get_platdata(&pdev->dev);
const struct mtd_partition *partitions = NULL;
int num_partitions = 0;
struct gpio_nand *priv;
struct nand_chip *this;
struct mtd_info *mtd;
int (*probe)(struct platform_device *pdev, struct gpio_nand *priv);
int err = 0;
if (pdata) {
partitions = pdata->parts;
num_partitions = pdata->num_parts;
}
/* Allocate memory for MTD device structure and private data */
priv = devm_kzalloc(&pdev->dev, sizeof(struct gpio_nand),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
this = &priv->nand_chip;
mtd = nand_to_mtd(this);
mtd->dev.parent = &pdev->dev;
nand_set_controller_data(this, priv);
nand_set_flash_node(this, pdev->dev.of_node);
priv->gpiod_rdy = devm_gpiod_get_optional(&pdev->dev, "rdy", GPIOD_IN);
if (IS_ERR(priv->gpiod_rdy)) {
err = PTR_ERR(priv->gpiod_rdy);
dev_warn(&pdev->dev, "RDY GPIO request failed (%d)\n", err);
return err;
}
platform_set_drvdata(pdev, priv);
/* Set chip enabled but write protected */
priv->gpiod_nwp = devm_gpiod_get_optional(&pdev->dev, "nwp",
GPIOD_OUT_HIGH);
if (IS_ERR(priv->gpiod_nwp)) {
err = PTR_ERR(priv->gpiod_nwp);
dev_err(&pdev->dev, "NWP GPIO request failed (%d)\n", err);
return err;
}
priv->gpiod_nce = devm_gpiod_get_optional(&pdev->dev, "nce",
GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_nce)) {
err = PTR_ERR(priv->gpiod_nce);
dev_err(&pdev->dev, "NCE GPIO request failed (%d)\n", err);
return err;
}
priv->gpiod_nre = devm_gpiod_get_optional(&pdev->dev, "nre",
GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_nre)) {
err = PTR_ERR(priv->gpiod_nre);
dev_err(&pdev->dev, "NRE GPIO request failed (%d)\n", err);
return err;
}
priv->gpiod_nwe = devm_gpiod_get_optional(&pdev->dev, "nwe",
GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_nwe)) {
err = PTR_ERR(priv->gpiod_nwe);
dev_err(&pdev->dev, "NWE GPIO request failed (%d)\n", err);
return err;
}
priv->gpiod_ale = devm_gpiod_get(&pdev->dev, "ale", GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_ale)) {
err = PTR_ERR(priv->gpiod_ale);
dev_err(&pdev->dev, "ALE GPIO request failed (%d)\n", err);
return err;
}
priv->gpiod_cle = devm_gpiod_get(&pdev->dev, "cle", GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_cle)) {
err = PTR_ERR(priv->gpiod_cle);
dev_err(&pdev->dev, "CLE GPIO request failed (%d)\n", err);
return err;
}
/* Request array of data pins, initialize them as input */
priv->data_gpiods = devm_gpiod_get_array_optional(&pdev->dev, "data",
GPIOD_IN);
if (IS_ERR(priv->data_gpiods)) {
err = PTR_ERR(priv->data_gpiods);
dev_err(&pdev->dev, "data GPIO request failed: %d\n", err);
return err;
}
if (priv->data_gpiods) {
if (!priv->gpiod_nwe) {
dev_err(&pdev->dev,
"mandatory NWE pin not provided by platform\n");
return -ENODEV;
}
priv->io_read = gpio_nand_io_read;
priv->io_write = gpio_nand_io_write;
priv->data_in = true;
}
if (pdev->id_entry)
probe = (void *) pdev->id_entry->driver_data;
else
probe = of_device_get_match_data(&pdev->dev);
if (probe)
err = probe(pdev, priv);
if (err)
return err;
if (!priv->io_read || !priv->io_write) {
dev_err(&pdev->dev, "incomplete device configuration\n");
return -ENODEV;
}
/* Initialize the NAND controller object embedded in gpio_nand. */
priv->base.ops = &gpio_nand_ops;
nand_controller_init(&priv->base);
this->controller = &priv->base;
/*
* FIXME: We should release write protection only after nand_scan() to
* be on the safe side but we can't do that until we have a generic way
* to assert/deassert WP from the core. Even if the core shouldn't
* write things in the nand_scan() path, it should have control on this
* pin just in case we ever need to disable write protection during
* chip detection/initialization.
*/
/* Release write protection */
gpiod_set_value(priv->gpiod_nwp, 0);
/*
* This driver assumes that the default ECC engine should be TYPE_SOFT.
* Set ->engine_type before registering the NAND devices in order to
* provide a driver specific default value.
*/
this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
/* Scan to find existence of the device */
err = nand_scan(this, 1);
if (err)
return err;
/* Register the partitions */
err = mtd_device_register(mtd, partitions, num_partitions);
if (err)
goto err_nand_cleanup;
return 0;
err_nand_cleanup:
nand_cleanup(this);
return err;
}
/*
* Clean up routine
*/
static void gpio_nand_remove(struct platform_device *pdev)
{
struct gpio_nand *priv = platform_get_drvdata(pdev);
struct mtd_info *mtd = nand_to_mtd(&priv->nand_chip);
int ret;
/* Apply write protection */
gpiod_set_value(priv->gpiod_nwp, 1);
/* Unregister device */
ret = mtd_device_unregister(mtd);
WARN_ON(ret);
nand_cleanup(mtd_to_nand(mtd));
}
#ifdef CONFIG_OF
static const struct of_device_id gpio_nand_of_id_table[] = {
{
/* sentinel */
},
};
MODULE_DEVICE_TABLE(of, gpio_nand_of_id_table);
#endif
static const struct platform_device_id gpio_nand_plat_id_table[] = {
{
.name = "ams-delta-nand",
}, {
/* sentinel */
},
};
MODULE_DEVICE_TABLE(platform, gpio_nand_plat_id_table);
static struct platform_driver gpio_nand_driver = {
.probe = gpio_nand_probe,
.remove_new = gpio_nand_remove,
.id_table = gpio_nand_plat_id_table,
.driver = {
.name = "ams-delta-nand",
.of_match_table = of_match_ptr(gpio_nand_of_id_table),
},
};
module_platform_driver(gpio_nand_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jonathan McDowell <[email protected]>");
MODULE_DESCRIPTION("Glue layer for NAND flash on Amstrad E3 (Delta)");
| linux-master | drivers/mtd/nand/raw/ams-delta.c |
/*
* NAND support for Marvell Orion SoC platforms
*
* Tzachi Perelstein <[email protected]>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/sizes.h>
#include <linux/platform_data/mtd-orion_nand.h>
struct orion_nand_info {
struct nand_controller controller;
struct nand_chip chip;
struct clk *clk;
};
static void orion_nand_cmd_ctrl(struct nand_chip *nc, int cmd,
unsigned int ctrl)
{
struct orion_nand_data *board = nand_get_controller_data(nc);
u32 offs;
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
offs = (1 << board->cle);
else if (ctrl & NAND_ALE)
offs = (1 << board->ale);
else
return;
if (nc->options & NAND_BUSWIDTH_16)
offs <<= 1;
writeb(cmd, nc->legacy.IO_ADDR_W + offs);
}
static void orion_nand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
void __iomem *io_base = chip->legacy.IO_ADDR_R;
#if defined(__LINUX_ARM_ARCH__) && __LINUX_ARM_ARCH__ >= 5
uint64_t *buf64;
#endif
int i = 0;
while (len && (unsigned long)buf & 7) {
*buf++ = readb(io_base);
len--;
}
#if defined(__LINUX_ARM_ARCH__) && __LINUX_ARM_ARCH__ >= 5
buf64 = (uint64_t *)buf;
while (i < len/8) {
/*
* Since GCC has no proper constraint (PR 43518)
* force x variable to r2/r3 registers as ldrd instruction
* requires first register to be even.
*/
register uint64_t x asm ("r2");
asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base));
buf64[i++] = x;
}
i *= 8;
#else
readsl(io_base, buf, len/4);
i = len / 4 * 4;
#endif
while (i < len)
buf[i++] = readb(io_base);
}
static int orion_nand_attach_chip(struct nand_chip *chip)
{
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
static const struct nand_controller_ops orion_nand_ops = {
.attach_chip = orion_nand_attach_chip,
};
static int __init orion_nand_probe(struct platform_device *pdev)
{
struct orion_nand_info *info;
struct mtd_info *mtd;
struct nand_chip *nc;
struct orion_nand_data *board;
void __iomem *io_base;
int ret = 0;
u32 val = 0;
info = devm_kzalloc(&pdev->dev,
sizeof(struct orion_nand_info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
nc = &info->chip;
mtd = nand_to_mtd(nc);
nand_controller_init(&info->controller);
info->controller.ops = &orion_nand_ops;
nc->controller = &info->controller;
io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
if (pdev->dev.of_node) {
board = devm_kzalloc(&pdev->dev, sizeof(struct orion_nand_data),
GFP_KERNEL);
if (!board)
return -ENOMEM;
if (!of_property_read_u32(pdev->dev.of_node, "cle", &val))
board->cle = (u8)val;
else
board->cle = 0;
if (!of_property_read_u32(pdev->dev.of_node, "ale", &val))
board->ale = (u8)val;
else
board->ale = 1;
if (!of_property_read_u32(pdev->dev.of_node,
"bank-width", &val))
board->width = (u8)val * 8;
else
board->width = 8;
if (!of_property_read_u32(pdev->dev.of_node,
"chip-delay", &val))
board->chip_delay = (u8)val;
} else {
board = dev_get_platdata(&pdev->dev);
}
mtd->dev.parent = &pdev->dev;
nand_set_controller_data(nc, board);
nand_set_flash_node(nc, pdev->dev.of_node);
nc->legacy.IO_ADDR_R = nc->legacy.IO_ADDR_W = io_base;
nc->legacy.cmd_ctrl = orion_nand_cmd_ctrl;
nc->legacy.read_buf = orion_nand_read_buf;
if (board->chip_delay)
nc->legacy.chip_delay = board->chip_delay;
WARN(board->width > 16,
"%d bit bus width out of range",
board->width);
if (board->width == 16)
nc->options |= NAND_BUSWIDTH_16;
platform_set_drvdata(pdev, info);
/* Not all platforms can gate the clock, so it is optional. */
info->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
if (IS_ERR(info->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(info->clk),
"failed to get and enable clock!\n");
/*
* This driver assumes that the default ECC engine should be TYPE_SOFT.
* Set ->engine_type before registering the NAND devices in order to
* provide a driver specific default value.
*/
nc->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
ret = nand_scan(nc, 1);
if (ret)
return ret;
mtd->name = "orion_nand";
ret = mtd_device_register(mtd, board->parts, board->nr_parts);
if (ret)
nand_cleanup(nc);
return ret;
}
static void orion_nand_remove(struct platform_device *pdev)
{
struct orion_nand_info *info = platform_get_drvdata(pdev);
struct nand_chip *chip = &info->chip;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
}
#ifdef CONFIG_OF
static const struct of_device_id orion_nand_of_match_table[] = {
{ .compatible = "marvell,orion-nand", },
{},
};
MODULE_DEVICE_TABLE(of, orion_nand_of_match_table);
#endif
static struct platform_driver orion_nand_driver = {
.remove_new = orion_nand_remove,
.driver = {
.name = "orion_nand",
.of_match_table = of_match_ptr(orion_nand_of_match_table),
},
};
module_platform_driver_probe(orion_nand_driver, orion_nand_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tzachi Perelstein");
MODULE_DESCRIPTION("NAND glue for Orion platforms");
MODULE_ALIAS("platform:orion_nand");
| linux-master | drivers/mtd/nand/raw/orion_nand.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Updated, and converted to generic GPIO based driver by Russell King.
*
* Written by Ben Dooks <[email protected]>
* Based on 2.4 version by Mark Whittaker
*
* © 2004 Simtec Electronics
*
* Device driver for NAND flash that uses a memory mapped interface to
* read/write the NAND commands and data, and GPIO pins for control signals
* (the DT binding refers to this as "GPIO assisted NAND flash")
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand-gpio.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/delay.h>
struct gpiomtd {
struct nand_controller base;
void __iomem *io;
void __iomem *io_sync;
struct nand_chip nand_chip;
struct gpio_nand_platdata plat;
struct gpio_desc *nce; /* Optional chip enable */
struct gpio_desc *cle;
struct gpio_desc *ale;
struct gpio_desc *rdy;
struct gpio_desc *nwp; /* Optional write protection */
};
static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd)
{
return container_of(mtd_to_nand(mtd), struct gpiomtd, nand_chip);
}
#ifdef CONFIG_ARM
/* gpio_nand_dosync()
*
* Make sure the GPIO state changes occur in-order with writes to NAND
* memory region.
* Needed on PXA due to bus-reordering within the SoC itself (see section on
* I/O ordering in PXA manual (section 2.3, p35)
*/
static void gpio_nand_dosync(struct gpiomtd *gpiomtd)
{
unsigned long tmp;
if (gpiomtd->io_sync) {
/*
* Linux memory barriers don't cater for what's required here.
* What's required is what's here - a read from a separate
* region with a dependency on that read.
*/
tmp = readl(gpiomtd->io_sync);
asm volatile("mov %1, %0\n" : "=r" (tmp) : "r" (tmp));
}
}
#else
static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {}
#endif
static int gpio_nand_exec_instr(struct nand_chip *chip,
const struct nand_op_instr *instr)
{
struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
unsigned int i;
switch (instr->type) {
case NAND_OP_CMD_INSTR:
gpio_nand_dosync(gpiomtd);
gpiod_set_value(gpiomtd->cle, 1);
gpio_nand_dosync(gpiomtd);
writeb(instr->ctx.cmd.opcode, gpiomtd->io);
gpio_nand_dosync(gpiomtd);
gpiod_set_value(gpiomtd->cle, 0);
return 0;
case NAND_OP_ADDR_INSTR:
gpio_nand_dosync(gpiomtd);
gpiod_set_value(gpiomtd->ale, 1);
gpio_nand_dosync(gpiomtd);
for (i = 0; i < instr->ctx.addr.naddrs; i++)
writeb(instr->ctx.addr.addrs[i], gpiomtd->io);
gpio_nand_dosync(gpiomtd);
gpiod_set_value(gpiomtd->ale, 0);
return 0;
case NAND_OP_DATA_IN_INSTR:
gpio_nand_dosync(gpiomtd);
if ((chip->options & NAND_BUSWIDTH_16) &&
!instr->ctx.data.force_8bit)
ioread16_rep(gpiomtd->io, instr->ctx.data.buf.in,
instr->ctx.data.len / 2);
else
ioread8_rep(gpiomtd->io, instr->ctx.data.buf.in,
instr->ctx.data.len);
return 0;
case NAND_OP_DATA_OUT_INSTR:
gpio_nand_dosync(gpiomtd);
if ((chip->options & NAND_BUSWIDTH_16) &&
!instr->ctx.data.force_8bit)
iowrite16_rep(gpiomtd->io, instr->ctx.data.buf.out,
instr->ctx.data.len / 2);
else
iowrite8_rep(gpiomtd->io, instr->ctx.data.buf.out,
instr->ctx.data.len);
return 0;
case NAND_OP_WAITRDY_INSTR:
if (!gpiomtd->rdy)
return nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
return nand_gpio_waitrdy(chip, gpiomtd->rdy,
instr->ctx.waitrdy.timeout_ms);
default:
return -EINVAL;
}
return 0;
}
static int gpio_nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
unsigned int i;
int ret = 0;
if (check_only)
return 0;
gpio_nand_dosync(gpiomtd);
gpiod_set_value(gpiomtd->nce, 0);
for (i = 0; i < op->ninstrs; i++) {
ret = gpio_nand_exec_instr(chip, &op->instrs[i]);
if (ret)
break;
if (op->instrs[i].delay_ns)
ndelay(op->instrs[i].delay_ns);
}
gpio_nand_dosync(gpiomtd);
gpiod_set_value(gpiomtd->nce, 1);
return ret;
}
static int gpio_nand_attach_chip(struct nand_chip *chip)
{
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
static const struct nand_controller_ops gpio_nand_ops = {
.exec_op = gpio_nand_exec_op,
.attach_chip = gpio_nand_attach_chip,
};
#ifdef CONFIG_OF
static const struct of_device_id gpio_nand_id_table[] = {
{ .compatible = "gpio-control-nand" },
{}
};
MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
static int gpio_nand_get_config_of(const struct device *dev,
struct gpio_nand_platdata *plat)
{
u32 val;
if (!dev->of_node)
return -ENODEV;
if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
if (val == 2) {
plat->options |= NAND_BUSWIDTH_16;
} else if (val != 1) {
dev_err(dev, "invalid bank-width %u\n", val);
return -EINVAL;
}
}
if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
plat->chip_delay = val;
return 0;
}
static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
{
struct resource *r;
u64 addr;
if (of_property_read_u64(pdev->dev.of_node,
"gpio-control-nand,io-sync-reg", &addr))
return NULL;
r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
if (!r)
return NULL;
r->start = addr;
r->end = r->start + 0x3;
r->flags = IORESOURCE_MEM;
return r;
}
#else /* CONFIG_OF */
static inline int gpio_nand_get_config_of(const struct device *dev,
struct gpio_nand_platdata *plat)
{
return -ENOSYS;
}
static inline struct resource *
gpio_nand_get_io_sync_of(struct platform_device *pdev)
{
return NULL;
}
#endif /* CONFIG_OF */
static inline int gpio_nand_get_config(const struct device *dev,
struct gpio_nand_platdata *plat)
{
int ret = gpio_nand_get_config_of(dev, plat);
if (!ret)
return ret;
if (dev_get_platdata(dev)) {
memcpy(plat, dev_get_platdata(dev), sizeof(*plat));
return 0;
}
return -EINVAL;
}
static inline struct resource *
gpio_nand_get_io_sync(struct platform_device *pdev)
{
struct resource *r = gpio_nand_get_io_sync_of(pdev);
if (r)
return r;
return platform_get_resource(pdev, IORESOURCE_MEM, 1);
}
static void gpio_nand_remove(struct platform_device *pdev)
{
struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
struct nand_chip *chip = &gpiomtd->nand_chip;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
/* Enable write protection and disable the chip */
if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
gpiod_set_value(gpiomtd->nwp, 0);
if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
gpiod_set_value(gpiomtd->nce, 0);
}
static int gpio_nand_probe(struct platform_device *pdev)
{
struct gpiomtd *gpiomtd;
struct nand_chip *chip;
struct mtd_info *mtd;
struct resource *res;
struct device *dev = &pdev->dev;
int ret = 0;
if (!dev->of_node && !dev_get_platdata(dev))
return -EINVAL;
gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL);
if (!gpiomtd)
return -ENOMEM;
chip = &gpiomtd->nand_chip;
gpiomtd->io = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpiomtd->io))
return PTR_ERR(gpiomtd->io);
res = gpio_nand_get_io_sync(pdev);
if (res) {
gpiomtd->io_sync = devm_ioremap_resource(dev, res);
if (IS_ERR(gpiomtd->io_sync))
return PTR_ERR(gpiomtd->io_sync);
}
ret = gpio_nand_get_config(dev, &gpiomtd->plat);
if (ret)
return ret;
/* Just enable the chip */
gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH);
if (IS_ERR(gpiomtd->nce))
return PTR_ERR(gpiomtd->nce);
/* We disable write protection once we know probe() will succeed */
gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW);
if (IS_ERR(gpiomtd->nwp)) {
ret = PTR_ERR(gpiomtd->nwp);
goto out_ce;
}
gpiomtd->ale = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
if (IS_ERR(gpiomtd->ale)) {
ret = PTR_ERR(gpiomtd->ale);
goto out_ce;
}
gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW);
if (IS_ERR(gpiomtd->cle)) {
ret = PTR_ERR(gpiomtd->cle);
goto out_ce;
}
gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN);
if (IS_ERR(gpiomtd->rdy)) {
ret = PTR_ERR(gpiomtd->rdy);
goto out_ce;
}
nand_controller_init(&gpiomtd->base);
gpiomtd->base.ops = &gpio_nand_ops;
nand_set_flash_node(chip, pdev->dev.of_node);
chip->options = gpiomtd->plat.options;
chip->controller = &gpiomtd->base;
mtd = nand_to_mtd(chip);
mtd->dev.parent = dev;
platform_set_drvdata(pdev, gpiomtd);
/* Disable write protection, if wired up */
if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
gpiod_direction_output(gpiomtd->nwp, 1);
/*
* This driver assumes that the default ECC engine should be TYPE_SOFT.
* Set ->engine_type before registering the NAND devices in order to
* provide a driver specific default value.
*/
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
ret = nand_scan(chip, 1);
if (ret)
goto err_wp;
if (gpiomtd->plat.adjust_parts)
gpiomtd->plat.adjust_parts(&gpiomtd->plat, mtd->size);
ret = mtd_device_register(mtd, gpiomtd->plat.parts,
gpiomtd->plat.num_parts);
if (!ret)
return 0;
err_wp:
if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
gpiod_set_value(gpiomtd->nwp, 0);
out_ce:
if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
gpiod_set_value(gpiomtd->nce, 0);
return ret;
}
static struct platform_driver gpio_nand_driver = {
.probe = gpio_nand_probe,
.remove_new = gpio_nand_remove,
.driver = {
.name = "gpio-nand",
.of_match_table = of_match_ptr(gpio_nand_id_table),
},
};
module_platform_driver(gpio_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <[email protected]>");
MODULE_DESCRIPTION("GPIO NAND Driver");
| linux-master | drivers/mtd/nand/raw/gpio.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004 Embedded Edge, LLC
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1550nd.h>
struct au1550nd_ctx {
struct nand_controller controller;
struct nand_chip chip;
int cs;
void __iomem *base;
};
static struct au1550nd_ctx *chip_to_au_ctx(struct nand_chip *this)
{
return container_of(this, struct au1550nd_ctx, chip);
}
/**
* au_write_buf - write buffer to chip
* @this: NAND chip object
* @buf: data buffer
* @len: number of bytes to write
*
* write function for 8bit buswidth
*/
static void au_write_buf(struct nand_chip *this, const void *buf,
unsigned int len)
{
struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
const u8 *p = buf;
int i;
for (i = 0; i < len; i++) {
writeb(p[i], ctx->base + MEM_STNAND_DATA);
wmb(); /* drain writebuffer */
}
}
/**
* au_read_buf - read chip data into buffer
* @this: NAND chip object
* @buf: buffer to store date
* @len: number of bytes to read
*
* read function for 8bit buswidth
*/
static void au_read_buf(struct nand_chip *this, void *buf,
unsigned int len)
{
struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
u8 *p = buf;
int i;
for (i = 0; i < len; i++) {
p[i] = readb(ctx->base + MEM_STNAND_DATA);
wmb(); /* drain writebuffer */
}
}
/**
* au_write_buf16 - write buffer to chip
* @this: NAND chip object
* @buf: data buffer
* @len: number of bytes to write
*
* write function for 16bit buswidth
*/
static void au_write_buf16(struct nand_chip *this, const void *buf,
unsigned int len)
{
struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
const u16 *p = buf;
unsigned int i;
len >>= 1;
for (i = 0; i < len; i++) {
writew(p[i], ctx->base + MEM_STNAND_DATA);
wmb(); /* drain writebuffer */
}
}
/**
* au_read_buf16 - read chip data into buffer
* @this: NAND chip object
* @buf: buffer to store date
* @len: number of bytes to read
*
* read function for 16bit buswidth
*/
static void au_read_buf16(struct nand_chip *this, void *buf, unsigned int len)
{
struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
unsigned int i;
u16 *p = buf;
len >>= 1;
for (i = 0; i < len; i++) {
p[i] = readw(ctx->base + MEM_STNAND_DATA);
wmb(); /* drain writebuffer */
}
}
static int find_nand_cs(unsigned long nand_base)
{
void __iomem *base =
(void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
unsigned long addr, staddr, start, mask, end;
int i;
for (i = 0; i < 4; i++) {
addr = 0x1000 + (i * 0x10); /* CSx */
staddr = __raw_readl(base + addr + 0x08); /* STADDRx */
/* figure out the decoded range of this CS */
start = (staddr << 4) & 0xfffc0000;
mask = (staddr << 18) & 0xfffc0000;
end = (start | (start - 1)) & ~(start ^ mask);
if ((nand_base >= start) && (nand_base < end))
return i;
}
return -ENODEV;
}
static int au1550nd_waitrdy(struct nand_chip *this, unsigned int timeout_ms)
{
unsigned long timeout_jiffies = jiffies;
timeout_jiffies += msecs_to_jiffies(timeout_ms) + 1;
do {
if (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1)
return 0;
usleep_range(10, 100);
} while (time_before(jiffies, timeout_jiffies));
return -ETIMEDOUT;
}
static int au1550nd_exec_instr(struct nand_chip *this,
const struct nand_op_instr *instr)
{
struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
unsigned int i;
int ret = 0;
switch (instr->type) {
case NAND_OP_CMD_INSTR:
writeb(instr->ctx.cmd.opcode,
ctx->base + MEM_STNAND_CMD);
/* Drain the writebuffer */
wmb();
break;
case NAND_OP_ADDR_INSTR:
for (i = 0; i < instr->ctx.addr.naddrs; i++) {
writeb(instr->ctx.addr.addrs[i],
ctx->base + MEM_STNAND_ADDR);
/* Drain the writebuffer */
wmb();
}
break;
case NAND_OP_DATA_IN_INSTR:
if ((this->options & NAND_BUSWIDTH_16) &&
!instr->ctx.data.force_8bit)
au_read_buf16(this, instr->ctx.data.buf.in,
instr->ctx.data.len);
else
au_read_buf(this, instr->ctx.data.buf.in,
instr->ctx.data.len);
break;
case NAND_OP_DATA_OUT_INSTR:
if ((this->options & NAND_BUSWIDTH_16) &&
!instr->ctx.data.force_8bit)
au_write_buf16(this, instr->ctx.data.buf.out,
instr->ctx.data.len);
else
au_write_buf(this, instr->ctx.data.buf.out,
instr->ctx.data.len);
break;
case NAND_OP_WAITRDY_INSTR:
ret = au1550nd_waitrdy(this, instr->ctx.waitrdy.timeout_ms);
break;
default:
return -EINVAL;
}
if (instr->delay_ns)
ndelay(instr->delay_ns);
return ret;
}
static int au1550nd_exec_op(struct nand_chip *this,
const struct nand_operation *op,
bool check_only)
{
struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
unsigned int i;
int ret;
if (check_only)
return 0;
/* assert (force assert) chip enable */
alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL);
/* Drain the writebuffer */
wmb();
for (i = 0; i < op->ninstrs; i++) {
ret = au1550nd_exec_instr(this, &op->instrs[i]);
if (ret)
break;
}
/* deassert chip enable */
alchemy_wrsmem(0, AU1000_MEM_STNDCTL);
/* Drain the writebuffer */
wmb();
return ret;
}
static int au1550nd_attach_chip(struct nand_chip *chip)
{
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
static const struct nand_controller_ops au1550nd_ops = {
.exec_op = au1550nd_exec_op,
.attach_chip = au1550nd_attach_chip,
};
static int au1550nd_probe(struct platform_device *pdev)
{
struct au1550nd_platdata *pd;
struct au1550nd_ctx *ctx;
struct nand_chip *this;
struct mtd_info *mtd;
struct resource *r;
int ret, cs;
pd = dev_get_platdata(&pdev->dev);
if (!pd) {
dev_err(&pdev->dev, "missing platform data\n");
return -ENODEV;
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "no NAND memory resource\n");
ret = -ENODEV;
goto out1;
}
if (request_mem_region(r->start, resource_size(r), "au1550-nand")) {
dev_err(&pdev->dev, "cannot claim NAND memory area\n");
ret = -ENOMEM;
goto out1;
}
ctx->base = ioremap(r->start, 0x1000);
if (!ctx->base) {
dev_err(&pdev->dev, "cannot remap NAND memory area\n");
ret = -ENODEV;
goto out2;
}
this = &ctx->chip;
mtd = nand_to_mtd(this);
mtd->dev.parent = &pdev->dev;
/* figure out which CS# r->start belongs to */
cs = find_nand_cs(r->start);
if (cs < 0) {
dev_err(&pdev->dev, "cannot detect NAND chipselect\n");
ret = -ENODEV;
goto out3;
}
ctx->cs = cs;
nand_controller_init(&ctx->controller);
ctx->controller.ops = &au1550nd_ops;
this->controller = &ctx->controller;
if (pd->devwidth)
this->options |= NAND_BUSWIDTH_16;
/*
* This driver assumes that the default ECC engine should be TYPE_SOFT.
* Set ->engine_type before registering the NAND devices in order to
* provide a driver specific default value.
*/
this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
ret = nand_scan(this, 1);
if (ret) {
dev_err(&pdev->dev, "NAND scan failed with %d\n", ret);
goto out3;
}
mtd_device_register(mtd, pd->parts, pd->num_parts);
platform_set_drvdata(pdev, ctx);
return 0;
out3:
iounmap(ctx->base);
out2:
release_mem_region(r->start, resource_size(r));
out1:
kfree(ctx);
return ret;
}
static void au1550nd_remove(struct platform_device *pdev)
{
struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct nand_chip *chip = &ctx->chip;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
iounmap(ctx->base);
release_mem_region(r->start, 0x1000);
kfree(ctx);
}
static struct platform_driver au1550nd_driver = {
.driver = {
.name = "au1550-nand",
},
.probe = au1550nd_probe,
.remove_new = au1550nd_remove,
};
module_platform_driver(au1550nd_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Embedded Edge, LLC");
MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on Pb1550 board");
| linux-master | drivers/mtd/nand/raw/au1550nd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Free Electrons
* Copyright (C) 2017 NextThing Co
*
* Author: Boris Brezillon <[email protected]>
*/
#include "internals.h"
static void samsung_nand_decode_id(struct nand_chip *chip)
{
struct nand_device *base = &chip->base;
struct nand_ecc_props requirements = {};
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
memorg = nanddev_get_memorg(&chip->base);
/* New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) */
if (chip->id.len == 6 && !nand_is_slc(chip) &&
chip->id.data[5] != 0x00) {
u8 extid = chip->id.data[3];
/* Get pagesize */
memorg->pagesize = 2048 << (extid & 0x03);
mtd->writesize = memorg->pagesize;
extid >>= 2;
/* Get oobsize */
switch (((extid >> 2) & 0x4) | (extid & 0x3)) {
case 1:
memorg->oobsize = 128;
break;
case 2:
memorg->oobsize = 218;
break;
case 3:
memorg->oobsize = 400;
break;
case 4:
memorg->oobsize = 436;
break;
case 5:
memorg->oobsize = 512;
break;
case 6:
memorg->oobsize = 640;
break;
default:
/*
* We should never reach this case, but if that
* happens, this probably means Samsung decided to use
* a different extended ID format, and we should find
* a way to support it.
*/
WARN(1, "Invalid OOB size value");
break;
}
mtd->oobsize = memorg->oobsize;
/* Get blocksize */
extid >>= 2;
memorg->pages_per_eraseblock = (128 * 1024) <<
(((extid >> 1) & 0x04) |
(extid & 0x03)) /
memorg->pagesize;
mtd->erasesize = (128 * 1024) <<
(((extid >> 1) & 0x04) | (extid & 0x03));
/* Extract ECC requirements from 5th id byte*/
extid = (chip->id.data[4] >> 4) & 0x07;
if (extid < 5) {
requirements.step_size = 512;
requirements.strength = 1 << extid;
} else {
requirements.step_size = 1024;
switch (extid) {
case 5:
requirements.strength = 24;
break;
case 6:
requirements.strength = 40;
break;
case 7:
requirements.strength = 60;
break;
default:
WARN(1, "Could not decode ECC info");
requirements.step_size = 0;
}
}
} else {
nand_decode_ext_id(chip);
if (nand_is_slc(chip)) {
switch (chip->id.data[1]) {
/* K9F4G08U0D-S[I|C]B0(T00) */
case 0xDC:
requirements.step_size = 512;
requirements.strength = 1;
break;
/* K9F1G08U0E 21nm chips do not support subpage write */
case 0xF1:
if (chip->id.len > 4 &&
(chip->id.data[4] & GENMASK(1, 0)) == 0x1)
chip->options |= NAND_NO_SUBPAGE_WRITE;
break;
default:
break;
}
}
}
nanddev_set_ecc_requirements(base, &requirements);
}
static int samsung_nand_init(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (mtd->writesize > 512)
chip->options |= NAND_SAMSUNG_LP_OPTIONS;
if (!nand_is_slc(chip))
chip->options |= NAND_BBM_LASTPAGE;
else
chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
return 0;
}
const struct nand_manufacturer_ops samsung_nand_manuf_ops = {
.detect = samsung_nand_decode_id,
.init = samsung_nand_init,
};
| linux-master | drivers/mtd/nand/raw/nand_samsung.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2008 Ilya Yanok, Emcraft Systems
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#define FPGA_NAND_CMD_MASK (0x7 << 28)
#define FPGA_NAND_CMD_COMMAND (0x0 << 28)
#define FPGA_NAND_CMD_ADDR (0x1 << 28)
#define FPGA_NAND_CMD_READ (0x2 << 28)
#define FPGA_NAND_CMD_WRITE (0x3 << 28)
#define FPGA_NAND_BUSY (0x1 << 15)
#define FPGA_NAND_ENABLE (0x1 << 31)
#define FPGA_NAND_DATA_SHIFT 16
struct socrates_nand_host {
struct nand_controller controller;
struct nand_chip nand_chip;
void __iomem *io_base;
struct device *dev;
};
/**
* socrates_nand_write_buf - write buffer to chip
* @this: NAND chip object
* @buf: data buffer
* @len: number of bytes to write
*/
static void socrates_nand_write_buf(struct nand_chip *this, const uint8_t *buf,
int len)
{
int i;
struct socrates_nand_host *host = nand_get_controller_data(this);
for (i = 0; i < len; i++) {
out_be32(host->io_base, FPGA_NAND_ENABLE |
FPGA_NAND_CMD_WRITE |
(buf[i] << FPGA_NAND_DATA_SHIFT));
}
}
/**
* socrates_nand_read_buf - read chip data into buffer
* @this: NAND chip object
* @buf: buffer to store date
* @len: number of bytes to read
*/
static void socrates_nand_read_buf(struct nand_chip *this, uint8_t *buf,
int len)
{
int i;
struct socrates_nand_host *host = nand_get_controller_data(this);
uint32_t val;
val = FPGA_NAND_ENABLE | FPGA_NAND_CMD_READ;
out_be32(host->io_base, val);
for (i = 0; i < len; i++) {
buf[i] = (in_be32(host->io_base) >>
FPGA_NAND_DATA_SHIFT) & 0xff;
}
}
/**
* socrates_nand_read_byte - read one byte from the chip
* @mtd: MTD device structure
*/
static uint8_t socrates_nand_read_byte(struct nand_chip *this)
{
uint8_t byte;
socrates_nand_read_buf(this, &byte, sizeof(byte));
return byte;
}
/*
* Hardware specific access to control-lines
*/
static void socrates_nand_cmd_ctrl(struct nand_chip *nand_chip, int cmd,
unsigned int ctrl)
{
struct socrates_nand_host *host = nand_get_controller_data(nand_chip);
uint32_t val;
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
val = FPGA_NAND_CMD_COMMAND;
else
val = FPGA_NAND_CMD_ADDR;
if (ctrl & NAND_NCE)
val |= FPGA_NAND_ENABLE;
val |= (cmd & 0xff) << FPGA_NAND_DATA_SHIFT;
out_be32(host->io_base, val);
}
/*
* Read the Device Ready pin.
*/
static int socrates_nand_device_ready(struct nand_chip *nand_chip)
{
struct socrates_nand_host *host = nand_get_controller_data(nand_chip);
if (in_be32(host->io_base) & FPGA_NAND_BUSY)
return 0; /* busy */
return 1;
}
static int socrates_attach_chip(struct nand_chip *chip)
{
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
static const struct nand_controller_ops socrates_ops = {
.attach_chip = socrates_attach_chip,
};
/*
* Probe for the NAND device.
*/
static int socrates_nand_probe(struct platform_device *ofdev)
{
struct socrates_nand_host *host;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
int res;
/* Allocate memory for the device structure (and zero it) */
host = devm_kzalloc(&ofdev->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
host->io_base = of_iomap(ofdev->dev.of_node, 0);
if (host->io_base == NULL) {
dev_err(&ofdev->dev, "ioremap failed\n");
return -EIO;
}
nand_chip = &host->nand_chip;
mtd = nand_to_mtd(nand_chip);
host->dev = &ofdev->dev;
nand_controller_init(&host->controller);
host->controller.ops = &socrates_ops;
nand_chip->controller = &host->controller;
/* link the private data structures */
nand_set_controller_data(nand_chip, host);
nand_set_flash_node(nand_chip, ofdev->dev.of_node);
mtd->name = "socrates_nand";
mtd->dev.parent = &ofdev->dev;
nand_chip->legacy.cmd_ctrl = socrates_nand_cmd_ctrl;
nand_chip->legacy.read_byte = socrates_nand_read_byte;
nand_chip->legacy.write_buf = socrates_nand_write_buf;
nand_chip->legacy.read_buf = socrates_nand_read_buf;
nand_chip->legacy.dev_ready = socrates_nand_device_ready;
/* TODO: I have no idea what real delay is. */
nand_chip->legacy.chip_delay = 20; /* 20us command delay time */
/*
* This driver assumes that the default ECC engine should be TYPE_SOFT.
* Set ->engine_type before registering the NAND devices in order to
* provide a driver specific default value.
*/
nand_chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
dev_set_drvdata(&ofdev->dev, host);
res = nand_scan(nand_chip, 1);
if (res)
goto out;
res = mtd_device_register(mtd, NULL, 0);
if (!res)
return res;
nand_cleanup(nand_chip);
out:
iounmap(host->io_base);
return res;
}
/*
* Remove a NAND device.
*/
static void socrates_nand_remove(struct platform_device *ofdev)
{
struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
struct nand_chip *chip = &host->nand_chip;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
iounmap(host->io_base);
}
static const struct of_device_id socrates_nand_match[] =
{
{
.compatible = "abb,socrates-nand",
},
{},
};
MODULE_DEVICE_TABLE(of, socrates_nand_match);
static struct platform_driver socrates_nand_driver = {
.driver = {
.name = "socrates_nand",
.of_match_table = socrates_nand_match,
},
.probe = socrates_nand_probe,
.remove_new = socrates_nand_remove,
};
module_platform_driver(socrates_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ilya Yanok");
MODULE_DESCRIPTION("NAND driver for Socrates board");
| linux-master | drivers/mtd/nand/raw/socrates_nand.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic NAND driver
*
* Author: Vitaly Wool <[email protected]>
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/platnand.h>
struct plat_nand_data {
struct nand_controller controller;
struct nand_chip chip;
void __iomem *io_base;
};
static int plat_nand_attach_chip(struct nand_chip *chip)
{
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
static const struct nand_controller_ops plat_nand_ops = {
.attach_chip = plat_nand_attach_chip,
};
/*
* Probe for the NAND device.
*/
static int plat_nand_probe(struct platform_device *pdev)
{
struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
struct plat_nand_data *data;
struct mtd_info *mtd;
const char **part_types;
int err = 0;
if (!pdata) {
dev_err(&pdev->dev, "platform_nand_data is missing\n");
return -EINVAL;
}
if (pdata->chip.nr_chips < 1) {
dev_err(&pdev->dev, "invalid number of chips specified\n");
return -EINVAL;
}
/* Allocate memory for the device structure (and zero it) */
data = devm_kzalloc(&pdev->dev, sizeof(struct plat_nand_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
data->controller.ops = &plat_nand_ops;
nand_controller_init(&data->controller);
data->chip.controller = &data->controller;
data->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->io_base))
return PTR_ERR(data->io_base);
nand_set_flash_node(&data->chip, pdev->dev.of_node);
mtd = nand_to_mtd(&data->chip);
mtd->dev.parent = &pdev->dev;
data->chip.legacy.IO_ADDR_R = data->io_base;
data->chip.legacy.IO_ADDR_W = data->io_base;
data->chip.legacy.cmd_ctrl = pdata->ctrl.cmd_ctrl;
data->chip.legacy.dev_ready = pdata->ctrl.dev_ready;
data->chip.legacy.select_chip = pdata->ctrl.select_chip;
data->chip.legacy.write_buf = pdata->ctrl.write_buf;
data->chip.legacy.read_buf = pdata->ctrl.read_buf;
data->chip.legacy.chip_delay = pdata->chip.chip_delay;
data->chip.options |= pdata->chip.options;
data->chip.bbt_options |= pdata->chip.bbt_options;
platform_set_drvdata(pdev, data);
/* Handle any platform specific setup */
if (pdata->ctrl.probe) {
err = pdata->ctrl.probe(pdev);
if (err)
goto out;
}
/*
* This driver assumes that the default ECC engine should be TYPE_SOFT.
* Set ->engine_type before registering the NAND devices in order to
* provide a driver specific default value.
*/
data->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
/* Scan to find existence of the device */
err = nand_scan(&data->chip, pdata->chip.nr_chips);
if (err)
goto out;
part_types = pdata->chip.part_probe_types;
err = mtd_device_parse_register(mtd, part_types, NULL,
pdata->chip.partitions,
pdata->chip.nr_partitions);
if (!err)
return err;
nand_cleanup(&data->chip);
out:
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
return err;
}
/*
* Remove a NAND device.
*/
static void plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
struct nand_chip *chip = &data->chip;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
}
static const struct of_device_id plat_nand_match[] = {
{ .compatible = "gen_nand" },
{},
};
MODULE_DEVICE_TABLE(of, plat_nand_match);
static struct platform_driver plat_nand_driver = {
.probe = plat_nand_probe,
.remove_new = plat_nand_remove,
.driver = {
.name = "gen_nand",
.of_match_table = plat_nand_match,
},
};
module_platform_driver(plat_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Wool");
MODULE_DESCRIPTION("Simple generic NAND driver");
MODULE_ALIAS("platform:gen_nand");
| linux-master | drivers/mtd/nand/raw/plat_nand.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Free Electrons
* Copyright (C) 2017 NextThing Co
*
* Author: Boris Brezillon <[email protected]>
*/
#include <linux/slab.h>
#include "linux/delay.h"
#include "internals.h"
#define MACRONIX_READ_RETRY_BIT BIT(0)
#define MACRONIX_NUM_READ_RETRY_MODES 6
#define ONFI_FEATURE_ADDR_MXIC_PROTECTION 0xA0
#define MXIC_BLOCK_PROTECTION_ALL_LOCK 0x38
#define MXIC_BLOCK_PROTECTION_ALL_UNLOCK 0x0
#define ONFI_FEATURE_ADDR_MXIC_RANDOMIZER 0xB0
#define MACRONIX_RANDOMIZER_BIT BIT(1)
#define MACRONIX_RANDOMIZER_ENPGM BIT(0)
#define MACRONIX_RANDOMIZER_RANDEN BIT(1)
#define MACRONIX_RANDOMIZER_RANDOPT BIT(2)
#define MACRONIX_RANDOMIZER_MODE_ENTER \
(MACRONIX_RANDOMIZER_ENPGM | \
MACRONIX_RANDOMIZER_RANDEN | \
MACRONIX_RANDOMIZER_RANDOPT)
#define MACRONIX_RANDOMIZER_MODE_EXIT \
(MACRONIX_RANDOMIZER_RANDEN | \
MACRONIX_RANDOMIZER_RANDOPT)
#define MXIC_CMD_POWER_DOWN 0xB9
#define ONFI_FEATURE_ADDR_30LFXG18AC_OTP 0x90
#define MACRONIX_30LFXG18AC_OTP_START_PAGE 2
#define MACRONIX_30LFXG18AC_OTP_PAGES 30
#define MACRONIX_30LFXG18AC_OTP_PAGE_SIZE 2112
#define MACRONIX_30LFXG18AC_OTP_SIZE_BYTES \
(MACRONIX_30LFXG18AC_OTP_PAGES * \
MACRONIX_30LFXG18AC_OTP_PAGE_SIZE)
#define MACRONIX_30LFXG18AC_OTP_EN BIT(0)
struct nand_onfi_vendor_macronix {
u8 reserved;
u8 reliability_func;
} __packed;
static int macronix_nand_setup_read_retry(struct nand_chip *chip, int mode)
{
u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
if (!chip->parameters.supports_set_get_features ||
!test_bit(ONFI_FEATURE_ADDR_READ_RETRY,
chip->parameters.set_feature_list))
return -ENOTSUPP;
feature[0] = mode;
return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
}
static int macronix_nand_randomizer_check_enable(struct nand_chip *chip)
{
u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
int ret;
ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
feature);
if (ret < 0)
return ret;
if (feature[0])
return feature[0];
feature[0] = MACRONIX_RANDOMIZER_MODE_ENTER;
ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
feature);
if (ret < 0)
return ret;
/* RANDEN and RANDOPT OTP bits are programmed */
feature[0] = 0x0;
ret = nand_prog_page_op(chip, 0, 0, feature, 1);
if (ret < 0)
return ret;
ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
feature);
if (ret < 0)
return ret;
feature[0] &= MACRONIX_RANDOMIZER_MODE_EXIT;
ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
feature);
if (ret < 0)
return ret;
return 0;
}
static void macronix_nand_onfi_init(struct nand_chip *chip)
{
struct nand_parameters *p = &chip->parameters;
struct nand_onfi_vendor_macronix *mxic;
struct device_node *dn = nand_get_flash_node(chip);
int rand_otp;
int ret;
if (!p->onfi)
return;
rand_otp = of_property_read_bool(dn, "mxic,enable-randomizer-otp");
mxic = (struct nand_onfi_vendor_macronix *)p->onfi->vendor;
/* Subpage write is prohibited in randomizer operatoin */
if (rand_otp && chip->options & NAND_NO_SUBPAGE_WRITE &&
mxic->reliability_func & MACRONIX_RANDOMIZER_BIT) {
if (p->supports_set_get_features) {
bitmap_set(p->set_feature_list,
ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
bitmap_set(p->get_feature_list,
ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
ret = macronix_nand_randomizer_check_enable(chip);
if (ret < 0) {
bitmap_clear(p->set_feature_list,
ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
1);
bitmap_clear(p->get_feature_list,
ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
1);
pr_info("Macronix NAND randomizer failed\n");
} else {
pr_info("Macronix NAND randomizer enabled\n");
}
}
}
if ((mxic->reliability_func & MACRONIX_READ_RETRY_BIT) == 0)
return;
chip->read_retries = MACRONIX_NUM_READ_RETRY_MODES;
chip->ops.setup_read_retry = macronix_nand_setup_read_retry;
if (p->supports_set_get_features) {
bitmap_set(p->set_feature_list,
ONFI_FEATURE_ADDR_READ_RETRY, 1);
bitmap_set(p->get_feature_list,
ONFI_FEATURE_ADDR_READ_RETRY, 1);
}
}
/*
* Macronix AC series does not support using SET/GET_FEATURES to change
* the timings unlike what is declared in the parameter page. Unflag
* this feature to avoid unnecessary downturns.
*/
static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
{
int i;
static const char * const broken_get_timings[] = {
"MX30LF1G18AC",
"MX30LF1G28AC",
"MX30LF2G18AC",
"MX30LF2G28AC",
"MX30LF4G18AC",
"MX30LF4G28AC",
"MX60LF8G18AC",
"MX30UF1G18AC",
"MX30UF1G16AC",
"MX30UF2G18AC",
"MX30UF2G16AC",
"MX30UF4G18AC",
"MX30UF4G16AC",
"MX30UF4G28AC",
};
if (!chip->parameters.supports_set_get_features)
return;
i = match_string(broken_get_timings, ARRAY_SIZE(broken_get_timings),
chip->parameters.model);
if (i < 0)
return;
bitmap_clear(chip->parameters.get_feature_list,
ONFI_FEATURE_ADDR_TIMING_MODE, 1);
bitmap_clear(chip->parameters.set_feature_list,
ONFI_FEATURE_ADDR_TIMING_MODE, 1);
}
/*
* Macronix NAND supports Block Protection by Protectoin(PT) pin;
* active high at power-on which protects the entire chip even the #WP is
* disabled. Lock/unlock protection area can be partition according to
* protection bits, i.e. upper 1/2 locked, upper 1/4 locked and so on.
*/
static int mxic_nand_lock(struct nand_chip *chip, loff_t ofs, uint64_t len)
{
u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
int ret;
feature[0] = MXIC_BLOCK_PROTECTION_ALL_LOCK;
nand_select_target(chip, 0);
ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
feature);
nand_deselect_target(chip);
if (ret)
pr_err("%s all blocks failed\n", __func__);
return ret;
}
static int mxic_nand_unlock(struct nand_chip *chip, loff_t ofs, uint64_t len)
{
u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
int ret;
feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
nand_select_target(chip, 0);
ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
feature);
nand_deselect_target(chip);
if (ret)
pr_err("%s all blocks failed\n", __func__);
return ret;
}
static void macronix_nand_block_protection_support(struct nand_chip *chip)
{
u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
int ret;
bitmap_set(chip->parameters.get_feature_list,
ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
nand_select_target(chip, 0);
ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
feature);
nand_deselect_target(chip);
if (ret || feature[0] != MXIC_BLOCK_PROTECTION_ALL_LOCK) {
if (ret)
pr_err("Block protection check failed\n");
bitmap_clear(chip->parameters.get_feature_list,
ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
return;
}
bitmap_set(chip->parameters.set_feature_list,
ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
chip->ops.lock_area = mxic_nand_lock;
chip->ops.unlock_area = mxic_nand_unlock;
}
static int nand_power_down_op(struct nand_chip *chip)
{
int ret;
if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_CMD(MXIC_CMD_POWER_DOWN, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
ret = nand_exec_op(chip, &op);
if (ret)
return ret;
} else {
chip->legacy.cmdfunc(chip, MXIC_CMD_POWER_DOWN, -1, -1);
}
return 0;
}
static int mxic_nand_suspend(struct nand_chip *chip)
{
int ret;
nand_select_target(chip, 0);
ret = nand_power_down_op(chip);
if (ret < 0)
pr_err("Suspending MXIC NAND chip failed (%d)\n", ret);
nand_deselect_target(chip);
return ret;
}
static void mxic_nand_resume(struct nand_chip *chip)
{
/*
* Toggle #CS pin to resume NAND device and don't care
* of the others CLE, #WE, #RE pins status.
* A NAND controller ensure it is able to assert/de-assert #CS
* by sending any byte over the NAND bus.
* i.e.,
* NAND power down command or reset command w/o R/B# status checking.
*/
nand_select_target(chip, 0);
nand_power_down_op(chip);
/* The minimum of a recovery time tRDP is 35 us */
usleep_range(35, 100);
nand_deselect_target(chip);
}
static void macronix_nand_deep_power_down_support(struct nand_chip *chip)
{
int i;
static const char * const deep_power_down_dev[] = {
"MX30UF1G28AD",
"MX30UF2G28AD",
"MX30UF4G28AD",
};
i = match_string(deep_power_down_dev, ARRAY_SIZE(deep_power_down_dev),
chip->parameters.model);
if (i < 0)
return;
chip->ops.suspend = mxic_nand_suspend;
chip->ops.resume = mxic_nand_resume;
}
static int macronix_30lfxg18ac_get_otp_info(struct mtd_info *mtd, size_t len,
size_t *retlen,
struct otp_info *buf)
{
if (len < sizeof(*buf))
return -EINVAL;
/* Always report that OTP is unlocked. Reason is that this
* type of flash chip doesn't provide way to check that OTP
* is locked or not: subfeature parameter is implemented as
* volatile register. Technically OTP region could be locked
* and become readonly, but as there is no way to check it,
* don't allow to lock it ('_lock_user_prot_reg' callback
* always returns -EOPNOTSUPP) and thus we report that OTP
* is unlocked.
*/
buf->locked = 0;
buf->start = 0;
buf->length = MACRONIX_30LFXG18AC_OTP_SIZE_BYTES;
*retlen = sizeof(*buf);
return 0;
}
static int macronix_30lfxg18ac_otp_enable(struct nand_chip *nand)
{
u8 feature_buf[ONFI_SUBFEATURE_PARAM_LEN] = { 0 };
feature_buf[0] = MACRONIX_30LFXG18AC_OTP_EN;
return nand_set_features(nand, ONFI_FEATURE_ADDR_30LFXG18AC_OTP,
feature_buf);
}
static int macronix_30lfxg18ac_otp_disable(struct nand_chip *nand)
{
u8 feature_buf[ONFI_SUBFEATURE_PARAM_LEN] = { 0 };
return nand_set_features(nand, ONFI_FEATURE_ADDR_30LFXG18AC_OTP,
feature_buf);
}
static int __macronix_30lfxg18ac_rw_otp(struct mtd_info *mtd,
loff_t offs_in_flash,
size_t len, size_t *retlen,
u_char *buf, bool write)
{
struct nand_chip *nand;
size_t bytes_handled;
off_t offs_in_page;
u64 page;
int ret;
nand = mtd_to_nand(mtd);
nand_select_target(nand, 0);
ret = macronix_30lfxg18ac_otp_enable(nand);
if (ret)
goto out_otp;
page = offs_in_flash;
/* 'page' will be result of division. */
offs_in_page = do_div(page, MACRONIX_30LFXG18AC_OTP_PAGE_SIZE);
bytes_handled = 0;
while (bytes_handled < len &&
page < MACRONIX_30LFXG18AC_OTP_PAGES) {
size_t bytes_to_handle;
u64 phys_page = page + MACRONIX_30LFXG18AC_OTP_START_PAGE;
bytes_to_handle = min_t(size_t, len - bytes_handled,
MACRONIX_30LFXG18AC_OTP_PAGE_SIZE -
offs_in_page);
if (write)
ret = nand_prog_page_op(nand, phys_page, offs_in_page,
&buf[bytes_handled], bytes_to_handle);
else
ret = nand_read_page_op(nand, phys_page, offs_in_page,
&buf[bytes_handled], bytes_to_handle);
if (ret)
goto out_otp;
bytes_handled += bytes_to_handle;
offs_in_page = 0;
page++;
}
*retlen = bytes_handled;
out_otp:
if (ret)
dev_err(&mtd->dev, "failed to perform OTP IO: %i\n", ret);
ret = macronix_30lfxg18ac_otp_disable(nand);
if (ret)
dev_err(&mtd->dev, "failed to leave OTP mode after %s\n",
write ? "write" : "read");
nand_deselect_target(nand);
return ret;
}
static int macronix_30lfxg18ac_write_otp(struct mtd_info *mtd, loff_t to,
size_t len, size_t *rlen,
const u_char *buf)
{
return __macronix_30lfxg18ac_rw_otp(mtd, to, len, rlen, (u_char *)buf,
true);
}
static int macronix_30lfxg18ac_read_otp(struct mtd_info *mtd, loff_t from,
size_t len, size_t *rlen,
u_char *buf)
{
return __macronix_30lfxg18ac_rw_otp(mtd, from, len, rlen, buf, false);
}
static int macronix_30lfxg18ac_lock_otp(struct mtd_info *mtd, loff_t from,
size_t len)
{
/* See comment in 'macronix_30lfxg18ac_get_otp_info()'. */
return -EOPNOTSUPP;
}
static void macronix_nand_setup_otp(struct nand_chip *chip)
{
static const char * const supported_otp_models[] = {
"MX30LF1G18AC",
"MX30LF2G18AC",
"MX30LF4G18AC",
};
struct mtd_info *mtd;
if (match_string(supported_otp_models,
ARRAY_SIZE(supported_otp_models),
chip->parameters.model) < 0)
return;
if (!chip->parameters.supports_set_get_features)
return;
bitmap_set(chip->parameters.get_feature_list,
ONFI_FEATURE_ADDR_30LFXG18AC_OTP, 1);
bitmap_set(chip->parameters.set_feature_list,
ONFI_FEATURE_ADDR_30LFXG18AC_OTP, 1);
mtd = nand_to_mtd(chip);
mtd->_get_user_prot_info = macronix_30lfxg18ac_get_otp_info;
mtd->_read_user_prot_reg = macronix_30lfxg18ac_read_otp;
mtd->_write_user_prot_reg = macronix_30lfxg18ac_write_otp;
mtd->_lock_user_prot_reg = macronix_30lfxg18ac_lock_otp;
}
static int macronix_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
macronix_nand_fix_broken_get_timings(chip);
macronix_nand_onfi_init(chip);
macronix_nand_block_protection_support(chip);
macronix_nand_deep_power_down_support(chip);
macronix_nand_setup_otp(chip);
return 0;
}
const struct nand_manufacturer_ops macronix_nand_manuf_ops = {
.init = macronix_nand_init,
};
| linux-master | drivers/mtd/nand/raw/nand_macronix.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NAND Flash Controller Device Driver for DT
*
* Copyright © 2011, Picochip.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include "denali.h"
struct denali_dt {
struct denali_controller controller;
struct clk *clk; /* core clock */
struct clk *clk_x; /* bus interface clock */
struct clk *clk_ecc; /* ECC circuit clock */
struct reset_control *rst; /* core reset */
struct reset_control *rst_reg; /* register reset */
};
struct denali_dt_data {
unsigned int revision;
unsigned int caps;
unsigned int oob_skip_bytes;
const struct nand_ecc_caps *ecc_caps;
};
NAND_ECC_CAPS_SINGLE(denali_socfpga_ecc_caps, denali_calc_ecc_bytes,
512, 8, 15);
static const struct denali_dt_data denali_socfpga_data = {
.caps = DENALI_CAP_HW_ECC_FIXUP,
.oob_skip_bytes = 2,
.ecc_caps = &denali_socfpga_ecc_caps,
};
NAND_ECC_CAPS_SINGLE(denali_uniphier_v5a_ecc_caps, denali_calc_ecc_bytes,
1024, 8, 16, 24);
static const struct denali_dt_data denali_uniphier_v5a_data = {
.caps = DENALI_CAP_HW_ECC_FIXUP |
DENALI_CAP_DMA_64BIT,
.oob_skip_bytes = 8,
.ecc_caps = &denali_uniphier_v5a_ecc_caps,
};
NAND_ECC_CAPS_SINGLE(denali_uniphier_v5b_ecc_caps, denali_calc_ecc_bytes,
1024, 8, 16);
static const struct denali_dt_data denali_uniphier_v5b_data = {
.revision = 0x0501,
.caps = DENALI_CAP_HW_ECC_FIXUP |
DENALI_CAP_DMA_64BIT,
.oob_skip_bytes = 8,
.ecc_caps = &denali_uniphier_v5b_ecc_caps,
};
static const struct of_device_id denali_nand_dt_ids[] = {
{
.compatible = "altr,socfpga-denali-nand",
.data = &denali_socfpga_data,
},
{
.compatible = "socionext,uniphier-denali-nand-v5a",
.data = &denali_uniphier_v5a_data,
},
{
.compatible = "socionext,uniphier-denali-nand-v5b",
.data = &denali_uniphier_v5b_data,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
static int denali_dt_chip_init(struct denali_controller *denali,
struct device_node *chip_np)
{
struct denali_chip *dchip;
u32 bank;
int nsels, i, ret;
nsels = of_property_count_u32_elems(chip_np, "reg");
if (nsels < 0)
return nsels;
dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels),
GFP_KERNEL);
if (!dchip)
return -ENOMEM;
dchip->nsels = nsels;
for (i = 0; i < nsels; i++) {
ret = of_property_read_u32_index(chip_np, "reg", i, &bank);
if (ret)
return ret;
dchip->sels[i].bank = bank;
nand_set_flash_node(&dchip->chip, chip_np);
}
return denali_chip_init(denali, dchip);
}
static int denali_dt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct denali_dt *dt;
const struct denali_dt_data *data;
struct denali_controller *denali;
struct device_node *np;
int ret;
dt = devm_kzalloc(dev, sizeof(*dt), GFP_KERNEL);
if (!dt)
return -ENOMEM;
denali = &dt->controller;
data = of_device_get_match_data(dev);
if (WARN_ON(!data))
return -EINVAL;
denali->revision = data->revision;
denali->caps = data->caps;
denali->oob_skip_bytes = data->oob_skip_bytes;
denali->ecc_caps = data->ecc_caps;
denali->dev = dev;
denali->irq = platform_get_irq(pdev, 0);
if (denali->irq < 0)
return denali->irq;
denali->reg = devm_platform_ioremap_resource_byname(pdev, "denali_reg");
if (IS_ERR(denali->reg))
return PTR_ERR(denali->reg);
denali->host = devm_platform_ioremap_resource_byname(pdev, "nand_data");
if (IS_ERR(denali->host))
return PTR_ERR(denali->host);
dt->clk = devm_clk_get(dev, "nand");
if (IS_ERR(dt->clk))
return PTR_ERR(dt->clk);
dt->clk_x = devm_clk_get(dev, "nand_x");
if (IS_ERR(dt->clk_x))
return PTR_ERR(dt->clk_x);
dt->clk_ecc = devm_clk_get(dev, "ecc");
if (IS_ERR(dt->clk_ecc))
return PTR_ERR(dt->clk_ecc);
dt->rst = devm_reset_control_get_optional_shared(dev, "nand");
if (IS_ERR(dt->rst))
return PTR_ERR(dt->rst);
dt->rst_reg = devm_reset_control_get_optional_shared(dev, "reg");
if (IS_ERR(dt->rst_reg))
return PTR_ERR(dt->rst_reg);
ret = clk_prepare_enable(dt->clk);
if (ret)
return ret;
ret = clk_prepare_enable(dt->clk_x);
if (ret)
goto out_disable_clk;
ret = clk_prepare_enable(dt->clk_ecc);
if (ret)
goto out_disable_clk_x;
denali->clk_rate = clk_get_rate(dt->clk);
denali->clk_x_rate = clk_get_rate(dt->clk_x);
/*
* Deassert the register reset, and the core reset in this order.
* Deasserting the core reset while the register reset is asserted
* will cause unpredictable behavior in the controller.
*/
ret = reset_control_deassert(dt->rst_reg);
if (ret)
goto out_disable_clk_ecc;
ret = reset_control_deassert(dt->rst);
if (ret)
goto out_assert_rst_reg;
/*
* When the reset is deasserted, the initialization sequence is kicked
* (bootstrap process). The driver must wait until it finished.
* Otherwise, it will result in unpredictable behavior.
*/
usleep_range(200, 1000);
ret = denali_init(denali);
if (ret)
goto out_assert_rst;
for_each_child_of_node(dev->of_node, np) {
ret = denali_dt_chip_init(denali, np);
if (ret) {
of_node_put(np);
goto out_remove_denali;
}
}
platform_set_drvdata(pdev, dt);
return 0;
out_remove_denali:
denali_remove(denali);
out_assert_rst:
reset_control_assert(dt->rst);
out_assert_rst_reg:
reset_control_assert(dt->rst_reg);
out_disable_clk_ecc:
clk_disable_unprepare(dt->clk_ecc);
out_disable_clk_x:
clk_disable_unprepare(dt->clk_x);
out_disable_clk:
clk_disable_unprepare(dt->clk);
return ret;
}
static void denali_dt_remove(struct platform_device *pdev)
{
struct denali_dt *dt = platform_get_drvdata(pdev);
denali_remove(&dt->controller);
reset_control_assert(dt->rst);
reset_control_assert(dt->rst_reg);
clk_disable_unprepare(dt->clk_ecc);
clk_disable_unprepare(dt->clk_x);
clk_disable_unprepare(dt->clk);
}
static struct platform_driver denali_dt_driver = {
.probe = denali_dt_probe,
.remove_new = denali_dt_remove,
.driver = {
.name = "denali-nand-dt",
.of_match_table = denali_nand_dt_ids,
},
};
module_platform_driver(denali_dt_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jamie Iles");
MODULE_DESCRIPTION("DT driver for Denali NAND controller");
| linux-master | drivers/mtd/nand/raw/denali_dt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ARM PL35X NAND flash controller driver
*
* Copyright (C) 2017 Xilinx, Inc
* Author:
* Miquel Raynal <[email protected]>
* Original work (rewritten):
* Punnaiah Choudary Kalluri <[email protected]>
* Naga Sureshkumar Relli <[email protected]>
*/
#include <linux/amba/bus.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
#define PL35X_NANDC_DRIVER_NAME "pl35x-nand-controller"
/* SMC controller status register (RO) */
#define PL35X_SMC_MEMC_STATUS 0x0
#define PL35X_SMC_MEMC_STATUS_RAW_INT_STATUS1 BIT(6)
/* SMC clear config register (WO) */
#define PL35X_SMC_MEMC_CFG_CLR 0xC
#define PL35X_SMC_MEMC_CFG_CLR_INT_DIS_1 BIT(1)
#define PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1 BIT(4)
#define PL35X_SMC_MEMC_CFG_CLR_ECC_INT_DIS_1 BIT(6)
/* SMC direct command register (WO) */
#define PL35X_SMC_DIRECT_CMD 0x10
#define PL35X_SMC_DIRECT_CMD_NAND_CS (0x4 << 23)
#define PL35X_SMC_DIRECT_CMD_UPD_REGS (0x2 << 21)
/* SMC set cycles register (WO) */
#define PL35X_SMC_CYCLES 0x14
#define PL35X_SMC_NAND_TRC_CYCLES(x) ((x) << 0)
#define PL35X_SMC_NAND_TWC_CYCLES(x) ((x) << 4)
#define PL35X_SMC_NAND_TREA_CYCLES(x) ((x) << 8)
#define PL35X_SMC_NAND_TWP_CYCLES(x) ((x) << 11)
#define PL35X_SMC_NAND_TCLR_CYCLES(x) ((x) << 14)
#define PL35X_SMC_NAND_TAR_CYCLES(x) ((x) << 17)
#define PL35X_SMC_NAND_TRR_CYCLES(x) ((x) << 20)
/* SMC set opmode register (WO) */
#define PL35X_SMC_OPMODE 0x18
#define PL35X_SMC_OPMODE_BW_8 0
#define PL35X_SMC_OPMODE_BW_16 1
/* SMC ECC status register (RO) */
#define PL35X_SMC_ECC_STATUS 0x400
#define PL35X_SMC_ECC_STATUS_ECC_BUSY BIT(6)
/* SMC ECC configuration register */
#define PL35X_SMC_ECC_CFG 0x404
#define PL35X_SMC_ECC_CFG_MODE_MASK 0xC
#define PL35X_SMC_ECC_CFG_MODE_BYPASS 0
#define PL35X_SMC_ECC_CFG_MODE_APB BIT(2)
#define PL35X_SMC_ECC_CFG_MODE_MEM BIT(3)
#define PL35X_SMC_ECC_CFG_PGSIZE_MASK 0x3
/* SMC ECC command 1 register */
#define PL35X_SMC_ECC_CMD1 0x408
#define PL35X_SMC_ECC_CMD1_WRITE(x) ((x) << 0)
#define PL35X_SMC_ECC_CMD1_READ(x) ((x) << 8)
#define PL35X_SMC_ECC_CMD1_READ_END(x) ((x) << 16)
#define PL35X_SMC_ECC_CMD1_READ_END_VALID(x) ((x) << 24)
/* SMC ECC command 2 register */
#define PL35X_SMC_ECC_CMD2 0x40C
#define PL35X_SMC_ECC_CMD2_WRITE_COL_CHG(x) ((x) << 0)
#define PL35X_SMC_ECC_CMD2_READ_COL_CHG(x) ((x) << 8)
#define PL35X_SMC_ECC_CMD2_READ_COL_CHG_END(x) ((x) << 16)
#define PL35X_SMC_ECC_CMD2_READ_COL_CHG_END_VALID(x) ((x) << 24)
/* SMC ECC value registers (RO) */
#define PL35X_SMC_ECC_VALUE(x) (0x418 + (4 * (x)))
#define PL35X_SMC_ECC_VALUE_IS_CORRECTABLE(x) ((x) & BIT(27))
#define PL35X_SMC_ECC_VALUE_HAS_FAILED(x) ((x) & BIT(28))
#define PL35X_SMC_ECC_VALUE_IS_VALID(x) ((x) & BIT(30))
/* NAND AXI interface */
#define PL35X_SMC_CMD_PHASE 0
#define PL35X_SMC_CMD_PHASE_CMD0(x) ((x) << 3)
#define PL35X_SMC_CMD_PHASE_CMD1(x) ((x) << 11)
#define PL35X_SMC_CMD_PHASE_CMD1_VALID BIT(20)
#define PL35X_SMC_CMD_PHASE_ADDR(pos, x) ((x) << (8 * (pos)))
#define PL35X_SMC_CMD_PHASE_NADDRS(x) ((x) << 21)
#define PL35X_SMC_DATA_PHASE BIT(19)
#define PL35X_SMC_DATA_PHASE_ECC_LAST BIT(10)
#define PL35X_SMC_DATA_PHASE_CLEAR_CS BIT(21)
#define PL35X_NAND_MAX_CS 1
#define PL35X_NAND_LAST_XFER_SZ 4
#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP((ps) / 1000, period_ns))
#define PL35X_NAND_ECC_BITS_MASK 0xFFF
#define PL35X_NAND_ECC_BYTE_OFF_MASK 0x1FF
#define PL35X_NAND_ECC_BIT_OFF_MASK 0x7
struct pl35x_nand_timings {
unsigned int t_rc:4;
unsigned int t_wc:4;
unsigned int t_rea:3;
unsigned int t_wp:3;
unsigned int t_clr:3;
unsigned int t_ar:3;
unsigned int t_rr:4;
unsigned int rsvd:8;
};
struct pl35x_nand {
struct list_head node;
struct nand_chip chip;
unsigned int cs;
unsigned int addr_cycles;
u32 ecc_cfg;
u32 timings;
};
/**
* struct pl35x_nandc - NAND flash controller driver structure
* @dev: Kernel device
* @conf_regs: SMC configuration registers for command phase
* @io_regs: NAND data registers for data phase
* @controller: Core NAND controller structure
* @chip: NAND chip information structure
* @selected_chip: NAND chip currently selected by the controller
* @assigned_cs: List of assigned CS
* @ecc_buf: Temporary buffer to extract ECC bytes
*/
struct pl35x_nandc {
struct device *dev;
void __iomem *conf_regs;
void __iomem *io_regs;
struct nand_controller controller;
struct list_head chips;
struct nand_chip *selected_chip;
unsigned long assigned_cs;
u8 *ecc_buf;
};
static inline struct pl35x_nandc *to_pl35x_nandc(struct nand_controller *ctrl)
{
return container_of(ctrl, struct pl35x_nandc, controller);
}
static inline struct pl35x_nand *to_pl35x_nand(struct nand_chip *chip)
{
return container_of(chip, struct pl35x_nand, chip);
}
static int pl35x_ecc_ooblayout16_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section >= chip->ecc.steps)
return -ERANGE;
oobregion->offset = (section * chip->ecc.bytes);
oobregion->length = chip->ecc.bytes;
return 0;
}
static int pl35x_ecc_ooblayout16_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section >= chip->ecc.steps)
return -ERANGE;
oobregion->offset = (section * chip->ecc.bytes) + 8;
oobregion->length = 8;
return 0;
}
static const struct mtd_ooblayout_ops pl35x_ecc_ooblayout16_ops = {
.ecc = pl35x_ecc_ooblayout16_ecc,
.free = pl35x_ecc_ooblayout16_free,
};
/* Generic flash bbt decriptors */
static u8 bbt_pattern[] = { 'B', 'b', 't', '0' };
static u8 mirror_pattern[] = { '1', 't', 'b', 'B' };
static struct nand_bbt_descr bbt_main_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
.offs = 4,
.len = 4,
.veroffs = 20,
.maxblocks = 4,
.pattern = bbt_pattern
};
static struct nand_bbt_descr bbt_mirror_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
.offs = 4,
.len = 4,
.veroffs = 20,
.maxblocks = 4,
.pattern = mirror_pattern
};
static void pl35x_smc_update_regs(struct pl35x_nandc *nfc)
{
writel(PL35X_SMC_DIRECT_CMD_NAND_CS |
PL35X_SMC_DIRECT_CMD_UPD_REGS,
nfc->conf_regs + PL35X_SMC_DIRECT_CMD);
}
static int pl35x_smc_set_buswidth(struct pl35x_nandc *nfc, unsigned int bw)
{
if (bw != PL35X_SMC_OPMODE_BW_8 && bw != PL35X_SMC_OPMODE_BW_16)
return -EINVAL;
writel(bw, nfc->conf_regs + PL35X_SMC_OPMODE);
pl35x_smc_update_regs(nfc);
return 0;
}
static void pl35x_smc_clear_irq(struct pl35x_nandc *nfc)
{
writel(PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1,
nfc->conf_regs + PL35X_SMC_MEMC_CFG_CLR);
}
static int pl35x_smc_wait_for_irq(struct pl35x_nandc *nfc)
{
u32 reg;
int ret;
ret = readl_poll_timeout(nfc->conf_regs + PL35X_SMC_MEMC_STATUS, reg,
reg & PL35X_SMC_MEMC_STATUS_RAW_INT_STATUS1,
10, 1000000);
if (ret)
dev_err(nfc->dev,
"Timeout polling on NAND controller interrupt (0x%x)\n",
reg);
pl35x_smc_clear_irq(nfc);
return ret;
}
static int pl35x_smc_wait_for_ecc_done(struct pl35x_nandc *nfc)
{
u32 reg;
int ret;
ret = readl_poll_timeout(nfc->conf_regs + PL35X_SMC_ECC_STATUS, reg,
!(reg & PL35X_SMC_ECC_STATUS_ECC_BUSY),
10, 1000000);
if (ret)
dev_err(nfc->dev,
"Timeout polling on ECC controller interrupt\n");
return ret;
}
static int pl35x_smc_set_ecc_mode(struct pl35x_nandc *nfc,
struct nand_chip *chip,
unsigned int mode)
{
struct pl35x_nand *plnand;
u32 ecc_cfg;
ecc_cfg = readl(nfc->conf_regs + PL35X_SMC_ECC_CFG);
ecc_cfg &= ~PL35X_SMC_ECC_CFG_MODE_MASK;
ecc_cfg |= mode;
writel(ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
if (chip) {
plnand = to_pl35x_nand(chip);
plnand->ecc_cfg = ecc_cfg;
}
if (mode != PL35X_SMC_ECC_CFG_MODE_BYPASS)
return pl35x_smc_wait_for_ecc_done(nfc);
return 0;
}
static void pl35x_smc_force_byte_access(struct nand_chip *chip,
bool force_8bit)
{
struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
int ret;
if (!(chip->options & NAND_BUSWIDTH_16))
return;
if (force_8bit)
ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_8);
else
ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_16);
if (ret)
dev_err(nfc->dev, "Error in Buswidth\n");
}
static void pl35x_nand_select_target(struct nand_chip *chip,
unsigned int die_nr)
{
struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
struct pl35x_nand *plnand = to_pl35x_nand(chip);
if (chip == nfc->selected_chip)
return;
/* Setup the timings */
writel(plnand->timings, nfc->conf_regs + PL35X_SMC_CYCLES);
pl35x_smc_update_regs(nfc);
/* Configure the ECC engine */
writel(plnand->ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
nfc->selected_chip = chip;
}
static void pl35x_nand_read_data_op(struct nand_chip *chip, u8 *in,
unsigned int len, bool force_8bit,
unsigned int flags, unsigned int last_flags)
{
struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
unsigned int buf_end = len / 4;
unsigned int in_start = round_down(len, 4);
unsigned int data_phase_addr;
u32 *buf32 = (u32 *)in;
u8 *buf8 = (u8 *)in;
int i;
if (force_8bit)
pl35x_smc_force_byte_access(chip, true);
for (i = 0; i < buf_end; i++) {
data_phase_addr = PL35X_SMC_DATA_PHASE + flags;
if (i + 1 == buf_end)
data_phase_addr = PL35X_SMC_DATA_PHASE + last_flags;
buf32[i] = readl(nfc->io_regs + data_phase_addr);
}
/* No working extra flags on unaligned data accesses */
for (i = in_start; i < len; i++)
buf8[i] = readb(nfc->io_regs + PL35X_SMC_DATA_PHASE);
if (force_8bit)
pl35x_smc_force_byte_access(chip, false);
}
static void pl35x_nand_write_data_op(struct nand_chip *chip, const u8 *out,
int len, bool force_8bit,
unsigned int flags,
unsigned int last_flags)
{
struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
unsigned int buf_end = len / 4;
unsigned int in_start = round_down(len, 4);
const u32 *buf32 = (const u32 *)out;
const u8 *buf8 = (const u8 *)out;
unsigned int data_phase_addr;
int i;
if (force_8bit)
pl35x_smc_force_byte_access(chip, true);
for (i = 0; i < buf_end; i++) {
data_phase_addr = PL35X_SMC_DATA_PHASE + flags;
if (i + 1 == buf_end)
data_phase_addr = PL35X_SMC_DATA_PHASE + last_flags;
writel(buf32[i], nfc->io_regs + data_phase_addr);
}
/* No working extra flags on unaligned data accesses */
for (i = in_start; i < len; i++)
writeb(buf8[i], nfc->io_regs + PL35X_SMC_DATA_PHASE);
if (force_8bit)
pl35x_smc_force_byte_access(chip, false);
}
static int pl35x_nand_correct_data(struct pl35x_nandc *nfc, unsigned char *buf,
unsigned char *read_ecc,
unsigned char *calc_ecc)
{
unsigned short ecc_odd, ecc_even, read_ecc_lower, read_ecc_upper;
unsigned short calc_ecc_lower, calc_ecc_upper;
unsigned short byte_addr, bit_addr;
read_ecc_lower = (read_ecc[0] | (read_ecc[1] << 8)) &
PL35X_NAND_ECC_BITS_MASK;
read_ecc_upper = ((read_ecc[1] >> 4) | (read_ecc[2] << 4)) &
PL35X_NAND_ECC_BITS_MASK;
calc_ecc_lower = (calc_ecc[0] | (calc_ecc[1] << 8)) &
PL35X_NAND_ECC_BITS_MASK;
calc_ecc_upper = ((calc_ecc[1] >> 4) | (calc_ecc[2] << 4)) &
PL35X_NAND_ECC_BITS_MASK;
ecc_odd = read_ecc_lower ^ calc_ecc_lower;
ecc_even = read_ecc_upper ^ calc_ecc_upper;
/* No error */
if (likely(!ecc_odd && !ecc_even))
return 0;
/* One error in the main data; to be corrected */
if (ecc_odd == (~ecc_even & PL35X_NAND_ECC_BITS_MASK)) {
/* Bits [11:3] of error code give the byte offset */
byte_addr = (ecc_odd >> 3) & PL35X_NAND_ECC_BYTE_OFF_MASK;
/* Bits [2:0] of error code give the bit offset */
bit_addr = ecc_odd & PL35X_NAND_ECC_BIT_OFF_MASK;
/* Toggle the faulty bit */
buf[byte_addr] ^= (BIT(bit_addr));
return 1;
}
/* One error in the ECC data; no action needed */
if (hweight32(ecc_odd | ecc_even) == 1)
return 1;
return -EBADMSG;
}
static void pl35x_nand_ecc_reg_to_array(struct nand_chip *chip, u32 ecc_reg,
u8 *ecc_array)
{
u32 ecc_value = ~ecc_reg;
unsigned int ecc_byte;
for (ecc_byte = 0; ecc_byte < chip->ecc.bytes; ecc_byte++)
ecc_array[ecc_byte] = ecc_value >> (8 * ecc_byte);
}
static int pl35x_nand_read_eccbytes(struct pl35x_nandc *nfc,
struct nand_chip *chip, u8 *read_ecc)
{
u32 ecc_value;
int chunk;
for (chunk = 0; chunk < chip->ecc.steps;
chunk++, read_ecc += chip->ecc.bytes) {
ecc_value = readl(nfc->conf_regs + PL35X_SMC_ECC_VALUE(chunk));
if (!PL35X_SMC_ECC_VALUE_IS_VALID(ecc_value))
return -EINVAL;
pl35x_nand_ecc_reg_to_array(chip, ecc_value, read_ecc);
}
return 0;
}
static int pl35x_nand_recover_data_hwecc(struct pl35x_nandc *nfc,
struct nand_chip *chip, u8 *data,
u8 *read_ecc)
{
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int max_bitflips = 0, chunk;
u8 calc_ecc[3];
u32 ecc_value;
int stats;
for (chunk = 0; chunk < chip->ecc.steps;
chunk++, data += chip->ecc.size, read_ecc += chip->ecc.bytes) {
/* Read ECC value for each chunk */
ecc_value = readl(nfc->conf_regs + PL35X_SMC_ECC_VALUE(chunk));
if (!PL35X_SMC_ECC_VALUE_IS_VALID(ecc_value))
return -EINVAL;
if (PL35X_SMC_ECC_VALUE_HAS_FAILED(ecc_value)) {
mtd->ecc_stats.failed++;
continue;
}
pl35x_nand_ecc_reg_to_array(chip, ecc_value, calc_ecc);
stats = pl35x_nand_correct_data(nfc, data, read_ecc, calc_ecc);
if (stats < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stats;
max_bitflips = max_t(unsigned int, max_bitflips, stats);
}
}
return max_bitflips;
}
static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
const u8 *buf, int oob_required,
int page)
{
struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
struct pl35x_nand *plnand = to_pl35x_nand(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int first_row = (mtd->writesize <= 512) ? 1 : 2;
unsigned int nrows = plnand->addr_cycles;
u32 addr1 = 0, addr2 = 0, row;
u32 cmd_addr;
int i, ret;
ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
if (ret)
return ret;
cmd_addr = PL35X_SMC_CMD_PHASE |
PL35X_SMC_CMD_PHASE_NADDRS(plnand->addr_cycles) |
PL35X_SMC_CMD_PHASE_CMD0(NAND_CMD_SEQIN);
for (i = 0, row = first_row; row < nrows; i++, row++) {
u8 addr = page >> ((i * 8) & 0xFF);
if (row < 4)
addr1 |= PL35X_SMC_CMD_PHASE_ADDR(row, addr);
else
addr2 |= PL35X_SMC_CMD_PHASE_ADDR(row - 4, addr);
}
/* Send the command and address cycles */
writel(addr1, nfc->io_regs + cmd_addr);
if (plnand->addr_cycles > 4)
writel(addr2, nfc->io_regs + cmd_addr);
/* Write the data with the engine enabled */
pl35x_nand_write_data_op(chip, buf, mtd->writesize, false,
0, PL35X_SMC_DATA_PHASE_ECC_LAST);
ret = pl35x_smc_wait_for_ecc_done(nfc);
if (ret)
goto disable_ecc_engine;
/* Copy the HW calculated ECC bytes in the OOB buffer */
ret = pl35x_nand_read_eccbytes(nfc, chip, nfc->ecc_buf);
if (ret)
goto disable_ecc_engine;
if (!oob_required)
memset(chip->oob_poi, 0xFF, mtd->oobsize);
ret = mtd_ooblayout_set_eccbytes(mtd, nfc->ecc_buf, chip->oob_poi,
0, chip->ecc.total);
if (ret)
goto disable_ecc_engine;
/* Write the spare area with ECC bytes */
pl35x_nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false, 0,
PL35X_SMC_CMD_PHASE_CMD1(NAND_CMD_PAGEPROG) |
PL35X_SMC_CMD_PHASE_CMD1_VALID |
PL35X_SMC_DATA_PHASE_CLEAR_CS);
ret = pl35x_smc_wait_for_irq(nfc);
if (ret)
goto disable_ecc_engine;
disable_ecc_engine:
pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
return ret;
}
/*
* This functions reads data and checks the data integrity by comparing hardware
* generated ECC values and read ECC values from spare area.
*
* There is a limitation with SMC controller: ECC_LAST must be set on the
* last data access to tell the ECC engine not to expect any further data.
* In practice, this implies to shrink the last data transfert by eg. 4 bytes,
* and doing a last 4-byte transfer with the additional bit set. The last block
* should be aligned with the end of an ECC block. Because of this limitation,
* it is not possible to use the core routines.
*/
static int pl35x_nand_read_page_hwecc(struct nand_chip *chip,
u8 *buf, int oob_required, int page)
{
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(nand_get_interface_config(chip));
struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
struct pl35x_nand *plnand = to_pl35x_nand(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int first_row = (mtd->writesize <= 512) ? 1 : 2;
unsigned int nrows = plnand->addr_cycles;
unsigned int addr1 = 0, addr2 = 0, row;
u32 cmd_addr;
int i, ret;
ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
if (ret)
return ret;
cmd_addr = PL35X_SMC_CMD_PHASE |
PL35X_SMC_CMD_PHASE_NADDRS(plnand->addr_cycles) |
PL35X_SMC_CMD_PHASE_CMD0(NAND_CMD_READ0) |
PL35X_SMC_CMD_PHASE_CMD1(NAND_CMD_READSTART) |
PL35X_SMC_CMD_PHASE_CMD1_VALID;
for (i = 0, row = first_row; row < nrows; i++, row++) {
u8 addr = page >> ((i * 8) & 0xFF);
if (row < 4)
addr1 |= PL35X_SMC_CMD_PHASE_ADDR(row, addr);
else
addr2 |= PL35X_SMC_CMD_PHASE_ADDR(row - 4, addr);
}
/* Send the command and address cycles */
writel(addr1, nfc->io_regs + cmd_addr);
if (plnand->addr_cycles > 4)
writel(addr2, nfc->io_regs + cmd_addr);
/* Wait the data to be available in the NAND cache */
ndelay(PSEC_TO_NSEC(sdr->tRR_min));
ret = pl35x_smc_wait_for_irq(nfc);
if (ret)
goto disable_ecc_engine;
/* Retrieve the raw data with the engine enabled */
pl35x_nand_read_data_op(chip, buf, mtd->writesize, false,
0, PL35X_SMC_DATA_PHASE_ECC_LAST);
ret = pl35x_smc_wait_for_ecc_done(nfc);
if (ret)
goto disable_ecc_engine;
/* Retrieve the stored ECC bytes */
pl35x_nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
0, PL35X_SMC_DATA_PHASE_CLEAR_CS);
ret = mtd_ooblayout_get_eccbytes(mtd, nfc->ecc_buf, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
goto disable_ecc_engine;
pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
/* Correct the data and report failures */
return pl35x_nand_recover_data_hwecc(nfc, chip, buf, nfc->ecc_buf);
disable_ecc_engine:
pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
return ret;
}
static int pl35x_nand_exec_op(struct nand_chip *chip,
const struct nand_subop *subop)
{
struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
const struct nand_op_instr *instr, *data_instr = NULL;
unsigned int rdy_tim_ms = 0, naddrs = 0, cmds = 0, last_flags = 0;
u32 addr1 = 0, addr2 = 0, cmd0 = 0, cmd1 = 0, cmd_addr = 0;
unsigned int op_id, len, offset, rdy_del_ns;
int last_instr_type = -1;
bool cmd1_valid = false;
const u8 *addrs;
int i, ret;
for (op_id = 0; op_id < subop->ninstrs; op_id++) {
instr = &subop->instrs[op_id];
switch (instr->type) {
case NAND_OP_CMD_INSTR:
if (!cmds) {
cmd0 = PL35X_SMC_CMD_PHASE_CMD0(instr->ctx.cmd.opcode);
} else {
cmd1 = PL35X_SMC_CMD_PHASE_CMD1(instr->ctx.cmd.opcode);
if (last_instr_type != NAND_OP_DATA_OUT_INSTR)
cmd1_valid = true;
}
cmds++;
break;
case NAND_OP_ADDR_INSTR:
offset = nand_subop_get_addr_start_off(subop, op_id);
naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
addrs = &instr->ctx.addr.addrs[offset];
cmd_addr |= PL35X_SMC_CMD_PHASE_NADDRS(naddrs);
for (i = offset; i < naddrs; i++) {
if (i < 4)
addr1 |= PL35X_SMC_CMD_PHASE_ADDR(i, addrs[i]);
else
addr2 |= PL35X_SMC_CMD_PHASE_ADDR(i - 4, addrs[i]);
}
break;
case NAND_OP_DATA_IN_INSTR:
case NAND_OP_DATA_OUT_INSTR:
data_instr = instr;
len = nand_subop_get_data_len(subop, op_id);
break;
case NAND_OP_WAITRDY_INSTR:
rdy_tim_ms = instr->ctx.waitrdy.timeout_ms;
rdy_del_ns = instr->delay_ns;
break;
}
last_instr_type = instr->type;
}
/* Command phase */
cmd_addr |= PL35X_SMC_CMD_PHASE | cmd0 | cmd1 |
(cmd1_valid ? PL35X_SMC_CMD_PHASE_CMD1_VALID : 0);
writel(addr1, nfc->io_regs + cmd_addr);
if (naddrs > 4)
writel(addr2, nfc->io_regs + cmd_addr);
/* Data phase */
if (data_instr && data_instr->type == NAND_OP_DATA_OUT_INSTR) {
last_flags = PL35X_SMC_DATA_PHASE_CLEAR_CS;
if (cmds == 2)
last_flags |= cmd1 | PL35X_SMC_CMD_PHASE_CMD1_VALID;
pl35x_nand_write_data_op(chip, data_instr->ctx.data.buf.out,
len, data_instr->ctx.data.force_8bit,
0, last_flags);
}
if (rdy_tim_ms) {
ndelay(rdy_del_ns);
ret = pl35x_smc_wait_for_irq(nfc);
if (ret)
return ret;
}
if (data_instr && data_instr->type == NAND_OP_DATA_IN_INSTR)
pl35x_nand_read_data_op(chip, data_instr->ctx.data.buf.in,
len, data_instr->ctx.data.force_8bit,
0, PL35X_SMC_DATA_PHASE_CLEAR_CS);
return 0;
}
static const struct nand_op_parser pl35x_nandc_op_parser = NAND_OP_PARSER(
NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7),
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 2112)),
NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2112),
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2112),
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
);
static int pl35x_nfc_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
if (!check_only)
pl35x_nand_select_target(chip, op->cs);
return nand_op_parser_exec_op(chip, &pl35x_nandc_op_parser,
op, check_only);
}
static int pl35x_nfc_setup_interface(struct nand_chip *chip, int cs,
const struct nand_interface_config *conf)
{
struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
struct pl35x_nand *plnand = to_pl35x_nand(chip);
struct pl35x_nand_timings tmgs = {};
const struct nand_sdr_timings *sdr;
unsigned int period_ns, val;
struct clk *mclk;
sdr = nand_get_sdr_timings(conf);
if (IS_ERR(sdr))
return PTR_ERR(sdr);
mclk = of_clk_get_by_name(nfc->dev->parent->of_node, "memclk");
if (IS_ERR(mclk)) {
dev_err(nfc->dev, "Failed to retrieve SMC memclk\n");
return PTR_ERR(mclk);
}
/*
* SDR timings are given in pico-seconds while NFC timings must be
* expressed in NAND controller clock cycles. We use the TO_CYCLE()
* macro to convert from one to the other.
*/
period_ns = NSEC_PER_SEC / clk_get_rate(mclk);
/*
* PL35X SMC needs one extra read cycle in SDR Mode 5. This is not
* written anywhere in the datasheet but is an empirical observation.
*/
val = TO_CYCLES(sdr->tRC_min, period_ns);
if (sdr->tRC_min <= 20000)
val++;
tmgs.t_rc = val;
if (tmgs.t_rc != val || tmgs.t_rc < 2)
return -EINVAL;
val = TO_CYCLES(sdr->tWC_min, period_ns);
tmgs.t_wc = val;
if (tmgs.t_wc != val || tmgs.t_wc < 2)
return -EINVAL;
/*
* For all SDR modes, PL35X SMC needs tREA_max being 1,
* this is also an empirical result.
*/
tmgs.t_rea = 1;
val = TO_CYCLES(sdr->tWP_min, period_ns);
tmgs.t_wp = val;
if (tmgs.t_wp != val || tmgs.t_wp < 1)
return -EINVAL;
val = TO_CYCLES(sdr->tCLR_min, period_ns);
tmgs.t_clr = val;
if (tmgs.t_clr != val)
return -EINVAL;
val = TO_CYCLES(sdr->tAR_min, period_ns);
tmgs.t_ar = val;
if (tmgs.t_ar != val)
return -EINVAL;
val = TO_CYCLES(sdr->tRR_min, period_ns);
tmgs.t_rr = val;
if (tmgs.t_rr != val)
return -EINVAL;
if (cs == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
plnand->timings = PL35X_SMC_NAND_TRC_CYCLES(tmgs.t_rc) |
PL35X_SMC_NAND_TWC_CYCLES(tmgs.t_wc) |
PL35X_SMC_NAND_TREA_CYCLES(tmgs.t_rea) |
PL35X_SMC_NAND_TWP_CYCLES(tmgs.t_wp) |
PL35X_SMC_NAND_TCLR_CYCLES(tmgs.t_clr) |
PL35X_SMC_NAND_TAR_CYCLES(tmgs.t_ar) |
PL35X_SMC_NAND_TRR_CYCLES(tmgs.t_rr);
return 0;
}
static void pl35x_smc_set_ecc_pg_size(struct pl35x_nandc *nfc,
struct nand_chip *chip,
unsigned int pg_sz)
{
struct pl35x_nand *plnand = to_pl35x_nand(chip);
u32 sz;
switch (pg_sz) {
case SZ_512:
sz = 1;
break;
case SZ_1K:
sz = 2;
break;
case SZ_2K:
sz = 3;
break;
default:
sz = 0;
break;
}
plnand->ecc_cfg = readl(nfc->conf_regs + PL35X_SMC_ECC_CFG);
plnand->ecc_cfg &= ~PL35X_SMC_ECC_CFG_PGSIZE_MASK;
plnand->ecc_cfg |= sz;
writel(plnand->ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
}
static int pl35x_nand_init_hw_ecc_controller(struct pl35x_nandc *nfc,
struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret = 0;
if (mtd->writesize < SZ_512 || mtd->writesize > SZ_2K) {
dev_err(nfc->dev,
"The hardware ECC engine is limited to pages up to 2kiB\n");
return -EOPNOTSUPP;
}
chip->ecc.strength = 1;
chip->ecc.bytes = 3;
chip->ecc.size = SZ_512;
chip->ecc.steps = mtd->writesize / chip->ecc.size;
chip->ecc.read_page = pl35x_nand_read_page_hwecc;
chip->ecc.write_page = pl35x_nand_write_page_hwecc;
chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
pl35x_smc_set_ecc_pg_size(nfc, chip, mtd->writesize);
nfc->ecc_buf = devm_kmalloc(nfc->dev, chip->ecc.bytes * chip->ecc.steps,
GFP_KERNEL);
if (!nfc->ecc_buf)
return -ENOMEM;
switch (mtd->oobsize) {
case 16:
/* Legacy Xilinx layout */
mtd_set_ooblayout(mtd, &pl35x_ecc_ooblayout16_ops);
chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
break;
case 64:
mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
break;
default:
dev_err(nfc->dev, "Unsupported OOB size\n");
return -EOPNOTSUPP;
}
return ret;
}
static int pl35x_nand_attach_chip(struct nand_chip *chip)
{
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(&chip->base);
struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
struct pl35x_nand *plnand = to_pl35x_nand(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
(!chip->ecc.size || !chip->ecc.strength)) {
if (requirements->step_size && requirements->strength) {
chip->ecc.size = requirements->step_size;
chip->ecc.strength = requirements->strength;
} else {
dev_info(nfc->dev,
"No minimum ECC strength, using 1b/512B\n");
chip->ecc.size = 512;
chip->ecc.strength = 1;
}
}
if (mtd->writesize <= SZ_512)
plnand->addr_cycles = 1;
else
plnand->addr_cycles = 2;
if (chip->options & NAND_ROW_ADDR_3)
plnand->addr_cycles += 3;
else
plnand->addr_cycles += 2;
switch (chip->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_ON_DIE:
/* Keep these legacy BBT descriptors for ON_DIE situations */
chip->bbt_td = &bbt_main_descr;
chip->bbt_md = &bbt_mirror_descr;
fallthrough;
case NAND_ECC_ENGINE_TYPE_NONE:
case NAND_ECC_ENGINE_TYPE_SOFT:
break;
case NAND_ECC_ENGINE_TYPE_ON_HOST:
ret = pl35x_nand_init_hw_ecc_controller(nfc, chip);
if (ret)
return ret;
break;
default:
dev_err(nfc->dev, "Unsupported ECC mode: %d\n",
chip->ecc.engine_type);
return -EINVAL;
}
return 0;
}
static const struct nand_controller_ops pl35x_nandc_ops = {
.attach_chip = pl35x_nand_attach_chip,
.exec_op = pl35x_nfc_exec_op,
.setup_interface = pl35x_nfc_setup_interface,
};
static int pl35x_nand_reset_state(struct pl35x_nandc *nfc)
{
int ret;
/* Disable interrupts and clear their status */
writel(PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1 |
PL35X_SMC_MEMC_CFG_CLR_ECC_INT_DIS_1 |
PL35X_SMC_MEMC_CFG_CLR_INT_DIS_1,
nfc->conf_regs + PL35X_SMC_MEMC_CFG_CLR);
/* Set default bus width to 8-bit */
ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_8);
if (ret)
return ret;
/* Ensure the ECC controller is bypassed by default */
ret = pl35x_smc_set_ecc_mode(nfc, NULL, PL35X_SMC_ECC_CFG_MODE_BYPASS);
if (ret)
return ret;
/*
* Configure the commands that the ECC block uses to detect the
* operations it should start/end.
*/
writel(PL35X_SMC_ECC_CMD1_WRITE(NAND_CMD_SEQIN) |
PL35X_SMC_ECC_CMD1_READ(NAND_CMD_READ0) |
PL35X_SMC_ECC_CMD1_READ_END(NAND_CMD_READSTART) |
PL35X_SMC_ECC_CMD1_READ_END_VALID(NAND_CMD_READ1),
nfc->conf_regs + PL35X_SMC_ECC_CMD1);
writel(PL35X_SMC_ECC_CMD2_WRITE_COL_CHG(NAND_CMD_RNDIN) |
PL35X_SMC_ECC_CMD2_READ_COL_CHG(NAND_CMD_RNDOUT) |
PL35X_SMC_ECC_CMD2_READ_COL_CHG_END(NAND_CMD_RNDOUTSTART) |
PL35X_SMC_ECC_CMD2_READ_COL_CHG_END_VALID(NAND_CMD_READ1),
nfc->conf_regs + PL35X_SMC_ECC_CMD2);
return 0;
}
static int pl35x_nand_chip_init(struct pl35x_nandc *nfc,
struct device_node *np)
{
struct pl35x_nand *plnand;
struct nand_chip *chip;
struct mtd_info *mtd;
int cs, ret;
plnand = devm_kzalloc(nfc->dev, sizeof(*plnand), GFP_KERNEL);
if (!plnand)
return -ENOMEM;
ret = of_property_read_u32(np, "reg", &cs);
if (ret)
return ret;
if (cs >= PL35X_NAND_MAX_CS) {
dev_err(nfc->dev, "Wrong CS %d\n", cs);
return -EINVAL;
}
if (test_and_set_bit(cs, &nfc->assigned_cs)) {
dev_err(nfc->dev, "Already assigned CS %d\n", cs);
return -EINVAL;
}
plnand->cs = cs;
chip = &plnand->chip;
chip->options = NAND_BUSWIDTH_AUTO | NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE;
chip->bbt_options = NAND_BBT_USE_FLASH;
chip->controller = &nfc->controller;
mtd = nand_to_mtd(chip);
mtd->dev.parent = nfc->dev;
nand_set_flash_node(chip, np);
if (!mtd->name) {
mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
"%s", PL35X_NANDC_DRIVER_NAME);
if (!mtd->name) {
dev_err(nfc->dev, "Failed to allocate mtd->name\n");
return -ENOMEM;
}
}
ret = nand_scan(chip, 1);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
nand_cleanup(chip);
return ret;
}
list_add_tail(&plnand->node, &nfc->chips);
return ret;
}
static void pl35x_nand_chips_cleanup(struct pl35x_nandc *nfc)
{
struct pl35x_nand *plnand, *tmp;
struct nand_chip *chip;
int ret;
list_for_each_entry_safe(plnand, tmp, &nfc->chips, node) {
chip = &plnand->chip;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
list_del(&plnand->node);
}
}
static int pl35x_nand_chips_init(struct pl35x_nandc *nfc)
{
struct device_node *np = nfc->dev->of_node, *nand_np;
int nchips = of_get_child_count(np);
int ret;
if (!nchips || nchips > PL35X_NAND_MAX_CS) {
dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n",
nchips);
return -EINVAL;
}
for_each_child_of_node(np, nand_np) {
ret = pl35x_nand_chip_init(nfc, nand_np);
if (ret) {
of_node_put(nand_np);
pl35x_nand_chips_cleanup(nfc);
break;
}
}
return ret;
}
static int pl35x_nand_probe(struct platform_device *pdev)
{
struct device *smc_dev = pdev->dev.parent;
struct amba_device *smc_amba = to_amba_device(smc_dev);
struct pl35x_nandc *nfc;
u32 ret;
nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
if (!nfc)
return -ENOMEM;
nfc->dev = &pdev->dev;
nand_controller_init(&nfc->controller);
nfc->controller.ops = &pl35x_nandc_ops;
INIT_LIST_HEAD(&nfc->chips);
nfc->conf_regs = devm_ioremap_resource(&smc_amba->dev, &smc_amba->res);
if (IS_ERR(nfc->conf_regs))
return PTR_ERR(nfc->conf_regs);
nfc->io_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nfc->io_regs))
return PTR_ERR(nfc->io_regs);
ret = pl35x_nand_reset_state(nfc);
if (ret)
return ret;
ret = pl35x_nand_chips_init(nfc);
if (ret)
return ret;
platform_set_drvdata(pdev, nfc);
return 0;
}
static void pl35x_nand_remove(struct platform_device *pdev)
{
struct pl35x_nandc *nfc = platform_get_drvdata(pdev);
pl35x_nand_chips_cleanup(nfc);
}
static const struct of_device_id pl35x_nand_of_match[] = {
{ .compatible = "arm,pl353-nand-r2p1" },
{},
};
MODULE_DEVICE_TABLE(of, pl35x_nand_of_match);
static struct platform_driver pl35x_nandc_driver = {
.probe = pl35x_nand_probe,
.remove_new = pl35x_nand_remove,
.driver = {
.name = PL35X_NANDC_DRIVER_NAME,
.of_match_table = pl35x_nand_of_match,
},
};
module_platform_driver(pl35x_nandc_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_ALIAS("platform:" PL35X_NANDC_DRIVER_NAME);
MODULE_DESCRIPTION("ARM PL35X NAND controller driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/nand/raw/pl35x-nand-controller.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright © 2004-2008 Simtec Electronics
* http://armlinux.simtec.co.uk/
* Ben Dooks <[email protected]>
*
* Samsung S3C2410/S3C2440/S3C2412 NAND driver
*/
#define pr_fmt(fmt) "nand-s3c2410: " fmt
#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
#define DEBUG
#endif
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/of.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/platform_data/mtd-nand-s3c2410.h>
#define S3C2410_NFREG(x) (x)
#define S3C2410_NFCONF S3C2410_NFREG(0x00)
#define S3C2410_NFCMD S3C2410_NFREG(0x04)
#define S3C2410_NFADDR S3C2410_NFREG(0x08)
#define S3C2410_NFDATA S3C2410_NFREG(0x0C)
#define S3C2410_NFSTAT S3C2410_NFREG(0x10)
#define S3C2410_NFECC S3C2410_NFREG(0x14)
#define S3C2440_NFCONT S3C2410_NFREG(0x04)
#define S3C2440_NFCMD S3C2410_NFREG(0x08)
#define S3C2440_NFADDR S3C2410_NFREG(0x0C)
#define S3C2440_NFDATA S3C2410_NFREG(0x10)
#define S3C2440_NFSTAT S3C2410_NFREG(0x20)
#define S3C2440_NFMECC0 S3C2410_NFREG(0x2C)
#define S3C2412_NFSTAT S3C2410_NFREG(0x28)
#define S3C2412_NFMECC0 S3C2410_NFREG(0x34)
#define S3C2410_NFCONF_EN (1<<15)
#define S3C2410_NFCONF_INITECC (1<<12)
#define S3C2410_NFCONF_nFCE (1<<11)
#define S3C2410_NFCONF_TACLS(x) ((x)<<8)
#define S3C2410_NFCONF_TWRPH0(x) ((x)<<4)
#define S3C2410_NFCONF_TWRPH1(x) ((x)<<0)
#define S3C2410_NFSTAT_BUSY (1<<0)
#define S3C2440_NFCONF_TACLS(x) ((x)<<12)
#define S3C2440_NFCONF_TWRPH0(x) ((x)<<8)
#define S3C2440_NFCONF_TWRPH1(x) ((x)<<4)
#define S3C2440_NFCONT_INITECC (1<<4)
#define S3C2440_NFCONT_nFCE (1<<1)
#define S3C2440_NFCONT_ENABLE (1<<0)
#define S3C2440_NFSTAT_READY (1<<0)
#define S3C2412_NFCONF_NANDBOOT (1<<31)
#define S3C2412_NFCONT_INIT_MAIN_ECC (1<<5)
#define S3C2412_NFCONT_nFCE0 (1<<1)
#define S3C2412_NFSTAT_READY (1<<0)
/* new oob placement block for use with hardware ecc generation
*/
static int s3c2410_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section)
return -ERANGE;
oobregion->offset = 0;
oobregion->length = 3;
return 0;
}
static int s3c2410_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section)
return -ERANGE;
oobregion->offset = 8;
oobregion->length = 8;
return 0;
}
static const struct mtd_ooblayout_ops s3c2410_ooblayout_ops = {
.ecc = s3c2410_ooblayout_ecc,
.free = s3c2410_ooblayout_free,
};
/* controller and mtd information */
struct s3c2410_nand_info;
/**
* struct s3c2410_nand_mtd - driver MTD structure
* @mtd: The MTD instance to pass to the MTD layer.
* @chip: The NAND chip information.
* @set: The platform information supplied for this set of NAND chips.
* @info: Link back to the hardware information.
*/
struct s3c2410_nand_mtd {
struct nand_chip chip;
struct s3c2410_nand_set *set;
struct s3c2410_nand_info *info;
};
enum s3c_cpu_type {
TYPE_S3C2410,
TYPE_S3C2412,
TYPE_S3C2440,
};
enum s3c_nand_clk_state {
CLOCK_DISABLE = 0,
CLOCK_ENABLE,
CLOCK_SUSPEND,
};
/* overview of the s3c2410 nand state */
/**
* struct s3c2410_nand_info - NAND controller state.
* @controller: Base controller structure.
* @mtds: An array of MTD instances on this controller.
* @platform: The platform data for this board.
* @device: The platform device we bound to.
* @clk: The clock resource for this controller.
* @regs: The area mapped for the hardware registers.
* @sel_reg: Pointer to the register controlling the NAND selection.
* @sel_bit: The bit in @sel_reg to select the NAND chip.
* @mtd_count: The number of MTDs created from this controller.
* @save_sel: The contents of @sel_reg to be saved over suspend.
* @clk_rate: The clock rate from @clk.
* @clk_state: The current clock state.
* @cpu_type: The exact type of this controller.
* @freq_transition: CPUFreq notifier block
*/
struct s3c2410_nand_info {
/* mtd info */
struct nand_controller controller;
struct s3c2410_nand_mtd *mtds;
struct s3c2410_platform_nand *platform;
/* device info */
struct device *device;
struct clk *clk;
void __iomem *regs;
void __iomem *sel_reg;
int sel_bit;
int mtd_count;
unsigned long save_sel;
unsigned long clk_rate;
enum s3c_nand_clk_state clk_state;
enum s3c_cpu_type cpu_type;
};
struct s3c24XX_nand_devtype_data {
enum s3c_cpu_type type;
};
static const struct s3c24XX_nand_devtype_data s3c2410_nand_devtype_data = {
.type = TYPE_S3C2410,
};
static const struct s3c24XX_nand_devtype_data s3c2412_nand_devtype_data = {
.type = TYPE_S3C2412,
};
static const struct s3c24XX_nand_devtype_data s3c2440_nand_devtype_data = {
.type = TYPE_S3C2440,
};
/* conversion functions */
static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd)
{
return container_of(mtd_to_nand(mtd), struct s3c2410_nand_mtd,
chip);
}
static struct s3c2410_nand_info *s3c2410_nand_mtd_toinfo(struct mtd_info *mtd)
{
return s3c2410_nand_mtd_toours(mtd)->info;
}
static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev)
{
return platform_get_drvdata(dev);
}
static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
{
return dev_get_platdata(&dev->dev);
}
static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
{
#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
return 1;
#else
return 0;
#endif
}
/**
* s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock.
* @info: The controller instance.
* @new_state: State to which clock should be set.
*/
static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info,
enum s3c_nand_clk_state new_state)
{
if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND)
return;
if (info->clk_state == CLOCK_ENABLE) {
if (new_state != CLOCK_ENABLE)
clk_disable_unprepare(info->clk);
} else {
if (new_state == CLOCK_ENABLE)
clk_prepare_enable(info->clk);
}
info->clk_state = new_state;
}
/* timing calculations */
#define NS_IN_KHZ 1000000
/**
* s3c_nand_calc_rate - calculate timing data.
* @wanted: The cycle time in nanoseconds.
* @clk: The clock rate in kHz.
* @max: The maximum divider value.
*
* Calculate the timing value from the given parameters.
*/
static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
{
int result;
result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ);
pr_debug("result %d from %ld, %d\n", result, clk, wanted);
if (result > max) {
pr_err("%d ns is too big for current clock rate %ld\n",
wanted, clk);
return -1;
}
if (result < 1)
result = 1;
return result;
}
#define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
/* controller setup */
/**
* s3c2410_nand_setrate - setup controller timing information.
* @info: The controller instance.
*
* Given the information supplied by the platform, calculate and set
* the necessary timing registers in the hardware to generate the
* necessary timing cycles to the hardware.
*/
static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
{
struct s3c2410_platform_nand *plat = info->platform;
int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
int tacls, twrph0, twrph1;
unsigned long clkrate = clk_get_rate(info->clk);
unsigned long set, cfg, mask;
unsigned long flags;
/* calculate the timing information for the controller */
info->clk_rate = clkrate;
clkrate /= 1000; /* turn clock into kHz for ease of use */
if (plat != NULL) {
tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max);
twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8);
twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8);
} else {
/* default timings */
tacls = tacls_max;
twrph0 = 8;
twrph1 = 8;
}
if (tacls < 0 || twrph0 < 0 || twrph1 < 0) {
dev_err(info->device, "cannot get suitable timings\n");
return -EINVAL;
}
dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate),
twrph1, to_ns(twrph1, clkrate));
switch (info->cpu_type) {
case TYPE_S3C2410:
mask = (S3C2410_NFCONF_TACLS(3) |
S3C2410_NFCONF_TWRPH0(7) |
S3C2410_NFCONF_TWRPH1(7));
set = S3C2410_NFCONF_EN;
set |= S3C2410_NFCONF_TACLS(tacls - 1);
set |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
set |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
break;
case TYPE_S3C2440:
case TYPE_S3C2412:
mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) |
S3C2440_NFCONF_TWRPH0(7) |
S3C2440_NFCONF_TWRPH1(7));
set = S3C2440_NFCONF_TACLS(tacls - 1);
set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
set |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
break;
default:
BUG();
}
local_irq_save(flags);
cfg = readl(info->regs + S3C2410_NFCONF);
cfg &= ~mask;
cfg |= set;
writel(cfg, info->regs + S3C2410_NFCONF);
local_irq_restore(flags);
dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
return 0;
}
/**
* s3c2410_nand_inithw - basic hardware initialisation
* @info: The hardware state.
*
* Do the basic initialisation of the hardware, using s3c2410_nand_setrate()
* to setup the hardware access speeds and set the controller to be enabled.
*/
static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
{
int ret;
ret = s3c2410_nand_setrate(info);
if (ret < 0)
return ret;
switch (info->cpu_type) {
case TYPE_S3C2410:
default:
break;
case TYPE_S3C2440:
case TYPE_S3C2412:
/* enable the controller and de-assert nFCE */
writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
}
return 0;
}
/**
* s3c2410_nand_select_chip - select the given nand chip
* @this: NAND chip object.
* @chip: The chip number.
*
* This is called by the MTD layer to either select a given chip for the
* @mtd instance, or to indicate that the access has finished and the
* chip can be de-selected.
*
* The routine ensures that the nFCE line is correctly setup, and any
* platform specific selection code is called to route nFCE to the specific
* chip.
*/
static void s3c2410_nand_select_chip(struct nand_chip *this, int chip)
{
struct s3c2410_nand_info *info;
struct s3c2410_nand_mtd *nmtd;
unsigned long cur;
nmtd = nand_get_controller_data(this);
info = nmtd->info;
if (chip != -1)
s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
cur = readl(info->sel_reg);
if (chip == -1) {
cur |= info->sel_bit;
} else {
if (nmtd->set != NULL && chip > nmtd->set->nr_chips) {
dev_err(info->device, "invalid chip %d\n", chip);
return;
}
if (info->platform != NULL) {
if (info->platform->select_chip != NULL)
(info->platform->select_chip) (nmtd->set, chip);
}
cur &= ~info->sel_bit;
}
writel(cur, info->sel_reg);
if (chip == -1)
s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
}
/* s3c2410_nand_hwcontrol
*
* Issue command and address cycles to the chip
*/
static void s3c2410_nand_hwcontrol(struct nand_chip *chip, int cmd,
unsigned int ctrl)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
writeb(cmd, info->regs + S3C2410_NFCMD);
else
writeb(cmd, info->regs + S3C2410_NFADDR);
}
/* command and control functions */
static void s3c2440_nand_hwcontrol(struct nand_chip *chip, int cmd,
unsigned int ctrl)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
writeb(cmd, info->regs + S3C2440_NFCMD);
else
writeb(cmd, info->regs + S3C2440_NFADDR);
}
/* s3c2410_nand_devready()
*
* returns 0 if the nand is busy, 1 if it is ready
*/
static int s3c2410_nand_devready(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY;
}
static int s3c2440_nand_devready(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
}
static int s3c2412_nand_devready(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY;
}
/* ECC handling functions */
static int s3c2410_nand_correct_data(struct nand_chip *chip, u_char *dat,
u_char *read_ecc, u_char *calc_ecc)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
unsigned int diff0, diff1, diff2;
unsigned int bit, byte;
pr_debug("%s(%p,%p,%p,%p)\n", __func__, mtd, dat, read_ecc, calc_ecc);
diff0 = read_ecc[0] ^ calc_ecc[0];
diff1 = read_ecc[1] ^ calc_ecc[1];
diff2 = read_ecc[2] ^ calc_ecc[2];
pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n",
__func__, 3, read_ecc, 3, calc_ecc,
diff0, diff1, diff2);
if (diff0 == 0 && diff1 == 0 && diff2 == 0)
return 0; /* ECC is ok */
/* sometimes people do not think about using the ECC, so check
* to see if we have an 0xff,0xff,0xff read ECC and then ignore
* the error, on the assumption that this is an un-eccd page.
*/
if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && read_ecc[2] == 0xff
&& info->platform->ignore_unset_ecc)
return 0;
/* Can we correct this ECC (ie, one row and column change).
* Note, this is similar to the 256 error code on smartmedia */
if (((diff0 ^ (diff0 >> 1)) & 0x55) == 0x55 &&
((diff1 ^ (diff1 >> 1)) & 0x55) == 0x55 &&
((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) {
/* calculate the bit position of the error */
bit = ((diff2 >> 3) & 1) |
((diff2 >> 4) & 2) |
((diff2 >> 5) & 4);
/* calculate the byte position of the error */
byte = ((diff2 << 7) & 0x100) |
((diff1 << 0) & 0x80) |
((diff1 << 1) & 0x40) |
((diff1 << 2) & 0x20) |
((diff1 << 3) & 0x10) |
((diff0 >> 4) & 0x08) |
((diff0 >> 3) & 0x04) |
((diff0 >> 2) & 0x02) |
((diff0 >> 1) & 0x01);
dev_dbg(info->device, "correcting error bit %d, byte %d\n",
bit, byte);
dat[byte] ^= (1 << bit);
return 1;
}
/* if there is only one bit difference in the ECC, then
* one of only a row or column parity has changed, which
* means the error is most probably in the ECC itself */
diff0 |= (diff1 << 8);
diff0 |= (diff2 << 16);
/* equal to "(diff0 & ~(1 << __ffs(diff0)))" */
if ((diff0 & (diff0 - 1)) == 0)
return 1;
return -1;
}
/* ECC functions
*
* These allow the s3c2410 and s3c2440 to use the controller's ECC
* generator block to ECC the data as it passes through]
*/
static void s3c2410_nand_enable_hwecc(struct nand_chip *chip, int mode)
{
struct s3c2410_nand_info *info;
unsigned long ctrl;
info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
ctrl = readl(info->regs + S3C2410_NFCONF);
ctrl |= S3C2410_NFCONF_INITECC;
writel(ctrl, info->regs + S3C2410_NFCONF);
}
static void s3c2412_nand_enable_hwecc(struct nand_chip *chip, int mode)
{
struct s3c2410_nand_info *info;
unsigned long ctrl;
info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
ctrl = readl(info->regs + S3C2440_NFCONT);
writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC,
info->regs + S3C2440_NFCONT);
}
static void s3c2440_nand_enable_hwecc(struct nand_chip *chip, int mode)
{
struct s3c2410_nand_info *info;
unsigned long ctrl;
info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
ctrl = readl(info->regs + S3C2440_NFCONT);
writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
}
static int s3c2410_nand_calculate_ecc(struct nand_chip *chip,
const u_char *dat, u_char *ecc_code)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0);
ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
return 0;
}
static int s3c2412_nand_calculate_ecc(struct nand_chip *chip,
const u_char *dat, u_char *ecc_code)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
unsigned long ecc = readl(info->regs + S3C2412_NFMECC0);
ecc_code[0] = ecc;
ecc_code[1] = ecc >> 8;
ecc_code[2] = ecc >> 16;
pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
return 0;
}
static int s3c2440_nand_calculate_ecc(struct nand_chip *chip,
const u_char *dat, u_char *ecc_code)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
ecc_code[0] = ecc;
ecc_code[1] = ecc >> 8;
ecc_code[2] = ecc >> 16;
pr_debug("%s: returning ecc %06lx\n", __func__, ecc & 0xffffff);
return 0;
}
/* over-ride the standard functions for a little more speed. We can
* use read/write block to move the data buffers to/from the controller
*/
static void s3c2410_nand_read_buf(struct nand_chip *this, u_char *buf, int len)
{
readsb(this->legacy.IO_ADDR_R, buf, len);
}
static void s3c2440_nand_read_buf(struct nand_chip *this, u_char *buf, int len)
{
struct mtd_info *mtd = nand_to_mtd(this);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
readsl(info->regs + S3C2440_NFDATA, buf, len >> 2);
/* cleanup if we've got less than a word to do */
if (len & 3) {
buf += len & ~3;
for (; len & 3; len--)
*buf++ = readb(info->regs + S3C2440_NFDATA);
}
}
static void s3c2410_nand_write_buf(struct nand_chip *this, const u_char *buf,
int len)
{
writesb(this->legacy.IO_ADDR_W, buf, len);
}
static void s3c2440_nand_write_buf(struct nand_chip *this, const u_char *buf,
int len)
{
struct mtd_info *mtd = nand_to_mtd(this);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
writesl(info->regs + S3C2440_NFDATA, buf, len >> 2);
/* cleanup any fractional write */
if (len & 3) {
buf += len & ~3;
for (; len & 3; len--, buf++)
writeb(*buf, info->regs + S3C2440_NFDATA);
}
}
/* device management functions */
static void s3c24xx_nand_remove(struct platform_device *pdev)
{
struct s3c2410_nand_info *info = to_nand_info(pdev);
if (info == NULL)
return;
/* Release all our mtds and their partitions, then go through
* freeing the resources used
*/
if (info->mtds != NULL) {
struct s3c2410_nand_mtd *ptr = info->mtds;
int mtdno;
for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {
pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
WARN_ON(mtd_device_unregister(nand_to_mtd(&ptr->chip)));
nand_cleanup(&ptr->chip);
}
}
/* free the common resources */
if (!IS_ERR(info->clk))
s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
}
static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *mtd,
struct s3c2410_nand_set *set)
{
if (set) {
struct mtd_info *mtdinfo = nand_to_mtd(&mtd->chip);
mtdinfo->name = set->name;
return mtd_device_register(mtdinfo, set->partitions,
set->nr_partitions);
}
return -ENODEV;
}
static int s3c2410_nand_setup_interface(struct nand_chip *chip, int csline,
const struct nand_interface_config *conf)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
struct s3c2410_platform_nand *pdata = info->platform;
const struct nand_sdr_timings *timings;
int tacls;
timings = nand_get_sdr_timings(conf);
if (IS_ERR(timings))
return -ENOTSUPP;
tacls = timings->tCLS_min - timings->tWP_min;
if (tacls < 0)
tacls = 0;
pdata->tacls = DIV_ROUND_UP(tacls, 1000);
pdata->twrph0 = DIV_ROUND_UP(timings->tWP_min, 1000);
pdata->twrph1 = DIV_ROUND_UP(timings->tCLH_min, 1000);
return s3c2410_nand_setrate(info);
}
/**
* s3c2410_nand_init_chip - initialise a single instance of an chip
* @info: The base NAND controller the chip is on.
* @nmtd: The new controller MTD instance to fill in.
* @set: The information passed from the board specific platform data.
*
* Initialise the given @nmtd from the information in @info and @set. This
* readies the structure for use with the MTD layer functions by ensuring
* all pointers are setup and the necessary control routines selected.
*/
static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *nmtd,
struct s3c2410_nand_set *set)
{
struct device_node *np = info->device->of_node;
struct nand_chip *chip = &nmtd->chip;
void __iomem *regs = info->regs;
nand_set_flash_node(chip, set->of_node);
chip->legacy.write_buf = s3c2410_nand_write_buf;
chip->legacy.read_buf = s3c2410_nand_read_buf;
chip->legacy.select_chip = s3c2410_nand_select_chip;
chip->legacy.chip_delay = 50;
nand_set_controller_data(chip, nmtd);
chip->options = set->options;
chip->controller = &info->controller;
/*
* let's keep behavior unchanged for legacy boards booting via pdata and
* auto-detect timings only when booting with a device tree.
*/
if (!np)
chip->options |= NAND_KEEP_TIMINGS;
switch (info->cpu_type) {
case TYPE_S3C2410:
chip->legacy.IO_ADDR_W = regs + S3C2410_NFDATA;
info->sel_reg = regs + S3C2410_NFCONF;
info->sel_bit = S3C2410_NFCONF_nFCE;
chip->legacy.cmd_ctrl = s3c2410_nand_hwcontrol;
chip->legacy.dev_ready = s3c2410_nand_devready;
break;
case TYPE_S3C2440:
chip->legacy.IO_ADDR_W = regs + S3C2440_NFDATA;
info->sel_reg = regs + S3C2440_NFCONT;
info->sel_bit = S3C2440_NFCONT_nFCE;
chip->legacy.cmd_ctrl = s3c2440_nand_hwcontrol;
chip->legacy.dev_ready = s3c2440_nand_devready;
chip->legacy.read_buf = s3c2440_nand_read_buf;
chip->legacy.write_buf = s3c2440_nand_write_buf;
break;
case TYPE_S3C2412:
chip->legacy.IO_ADDR_W = regs + S3C2440_NFDATA;
info->sel_reg = regs + S3C2440_NFCONT;
info->sel_bit = S3C2412_NFCONT_nFCE0;
chip->legacy.cmd_ctrl = s3c2440_nand_hwcontrol;
chip->legacy.dev_ready = s3c2412_nand_devready;
if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT)
dev_info(info->device, "System booted from NAND\n");
break;
}
chip->legacy.IO_ADDR_R = chip->legacy.IO_ADDR_W;
nmtd->info = info;
nmtd->set = set;
chip->ecc.engine_type = info->platform->engine_type;
/*
* If you use u-boot BBT creation code, specifying this flag will
* let the kernel fish out the BBT from the NAND.
*/
if (set->flash_bbt)
chip->bbt_options |= NAND_BBT_USE_FLASH;
}
/**
* s3c2410_nand_attach_chip - Init the ECC engine after NAND scan
* @chip: The NAND chip
*
* This hook is called by the core after the identification of the NAND chip,
* once the relevant per-chip information is up to date.. This call ensure that
* we update the internal state accordingly.
*
* The internal state is currently limited to the ECC state information.
*/
static int s3c2410_nand_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
switch (chip->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_NONE:
dev_info(info->device, "ECC disabled\n");
break;
case NAND_ECC_ENGINE_TYPE_SOFT:
/*
* This driver expects Hamming based ECC when engine_type is set
* to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to
* NAND_ECC_ALGO_HAMMING to avoid adding an extra ecc_algo field
* to s3c2410_platform_nand.
*/
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
dev_info(info->device, "soft ECC\n");
break;
case NAND_ECC_ENGINE_TYPE_ON_HOST:
chip->ecc.calculate = s3c2410_nand_calculate_ecc;
chip->ecc.correct = s3c2410_nand_correct_data;
chip->ecc.strength = 1;
switch (info->cpu_type) {
case TYPE_S3C2410:
chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
chip->ecc.calculate = s3c2410_nand_calculate_ecc;
break;
case TYPE_S3C2412:
chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
chip->ecc.calculate = s3c2412_nand_calculate_ecc;
break;
case TYPE_S3C2440:
chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
chip->ecc.calculate = s3c2440_nand_calculate_ecc;
break;
}
dev_dbg(info->device, "chip %p => page shift %d\n",
chip, chip->page_shift);
/* change the behaviour depending on whether we are using
* the large or small page nand device */
if (chip->page_shift > 10) {
chip->ecc.size = 256;
chip->ecc.bytes = 3;
} else {
chip->ecc.size = 512;
chip->ecc.bytes = 3;
mtd_set_ooblayout(nand_to_mtd(chip),
&s3c2410_ooblayout_ops);
}
dev_info(info->device, "hardware ECC\n");
break;
default:
dev_err(info->device, "invalid ECC mode!\n");
return -EINVAL;
}
if (chip->bbt_options & NAND_BBT_USE_FLASH)
chip->options |= NAND_SKIP_BBTSCAN;
return 0;
}
static const struct nand_controller_ops s3c24xx_nand_controller_ops = {
.attach_chip = s3c2410_nand_attach_chip,
.setup_interface = s3c2410_nand_setup_interface,
};
static const struct of_device_id s3c24xx_nand_dt_ids[] = {
{
.compatible = "samsung,s3c2410-nand",
.data = &s3c2410_nand_devtype_data,
}, {
/* also compatible with s3c6400 */
.compatible = "samsung,s3c2412-nand",
.data = &s3c2412_nand_devtype_data,
}, {
.compatible = "samsung,s3c2440-nand",
.data = &s3c2440_nand_devtype_data,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, s3c24xx_nand_dt_ids);
static int s3c24xx_nand_probe_dt(struct platform_device *pdev)
{
const struct s3c24XX_nand_devtype_data *devtype_data;
struct s3c2410_platform_nand *pdata;
struct s3c2410_nand_info *info = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node, *child;
struct s3c2410_nand_set *sets;
devtype_data = of_device_get_match_data(&pdev->dev);
if (!devtype_data)
return -ENODEV;
info->cpu_type = devtype_data->type;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdev->dev.platform_data = pdata;
pdata->nr_sets = of_get_child_count(np);
if (!pdata->nr_sets)
return 0;
sets = devm_kcalloc(&pdev->dev, pdata->nr_sets, sizeof(*sets),
GFP_KERNEL);
if (!sets)
return -ENOMEM;
pdata->sets = sets;
for_each_available_child_of_node(np, child) {
sets->name = (char *)child->name;
sets->of_node = child;
sets->nr_chips = 1;
of_node_get(child);
sets++;
}
return 0;
}
static int s3c24xx_nand_probe_pdata(struct platform_device *pdev)
{
struct s3c2410_nand_info *info = platform_get_drvdata(pdev);
info->cpu_type = platform_get_device_id(pdev)->driver_data;
return 0;
}
/* s3c24xx_nand_probe
*
* called by device layer when it finds a device matching
* one our driver can handled. This code checks to see if
* it can allocate all necessary resources then calls the
* nand layer to look for devices
*/
static int s3c24xx_nand_probe(struct platform_device *pdev)
{
struct s3c2410_platform_nand *plat;
struct s3c2410_nand_info *info;
struct s3c2410_nand_mtd *nmtd;
struct s3c2410_nand_set *sets;
struct resource *res;
int err = 0;
int size;
int nr_sets;
int setno;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (info == NULL) {
err = -ENOMEM;
goto exit_error;
}
platform_set_drvdata(pdev, info);
nand_controller_init(&info->controller);
info->controller.ops = &s3c24xx_nand_controller_ops;
/* get the clock source and enable it */
info->clk = devm_clk_get(&pdev->dev, "nand");
if (IS_ERR(info->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
err = -ENOENT;
goto exit_error;
}
s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
if (pdev->dev.of_node)
err = s3c24xx_nand_probe_dt(pdev);
else
err = s3c24xx_nand_probe_pdata(pdev);
if (err)
goto exit_error;
plat = to_nand_plat(pdev);
/* allocate and map the resource */
/* currently we assume we have the one resource */
res = pdev->resource;
size = resource_size(res);
info->device = &pdev->dev;
info->platform = plat;
info->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(info->regs)) {
err = PTR_ERR(info->regs);
goto exit_error;
}
dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs);
if (!plat->sets || plat->nr_sets < 1) {
err = -EINVAL;
goto exit_error;
}
sets = plat->sets;
nr_sets = plat->nr_sets;
info->mtd_count = nr_sets;
/* allocate our information */
size = nr_sets * sizeof(*info->mtds);
info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (info->mtds == NULL) {
err = -ENOMEM;
goto exit_error;
}
/* initialise all possible chips */
nmtd = info->mtds;
for (setno = 0; setno < nr_sets; setno++, nmtd++, sets++) {
struct mtd_info *mtd = nand_to_mtd(&nmtd->chip);
pr_debug("initialising set %d (%p, info %p)\n",
setno, nmtd, info);
mtd->dev.parent = &pdev->dev;
s3c2410_nand_init_chip(info, nmtd, sets);
err = nand_scan(&nmtd->chip, sets ? sets->nr_chips : 1);
if (err)
goto exit_error;
s3c2410_nand_add_partition(info, nmtd, sets);
}
/* initialise the hardware */
err = s3c2410_nand_inithw(info);
if (err != 0)
goto exit_error;
if (allow_clk_suspend(info)) {
dev_info(&pdev->dev, "clock idle support enabled\n");
s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
}
return 0;
exit_error:
s3c24xx_nand_remove(pdev);
if (err == 0)
err = -EINVAL;
return err;
}
/* PM Support */
#ifdef CONFIG_PM
static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
{
struct s3c2410_nand_info *info = platform_get_drvdata(dev);
if (info) {
info->save_sel = readl(info->sel_reg);
/* For the moment, we must ensure nFCE is high during
* the time we are suspended. This really should be
* handled by suspending the MTDs we are using, but
* that is currently not the case. */
writel(info->save_sel | info->sel_bit, info->sel_reg);
s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
}
return 0;
}
static int s3c24xx_nand_resume(struct platform_device *dev)
{
struct s3c2410_nand_info *info = platform_get_drvdata(dev);
unsigned long sel;
if (info) {
s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
s3c2410_nand_inithw(info);
/* Restore the state of the nFCE line. */
sel = readl(info->sel_reg);
sel &= ~info->sel_bit;
sel |= info->save_sel & info->sel_bit;
writel(sel, info->sel_reg);
s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
}
return 0;
}
#else
#define s3c24xx_nand_suspend NULL
#define s3c24xx_nand_resume NULL
#endif
/* driver device registration */
static const struct platform_device_id s3c24xx_driver_ids[] = {
{
.name = "s3c2410-nand",
.driver_data = TYPE_S3C2410,
}, {
.name = "s3c2440-nand",
.driver_data = TYPE_S3C2440,
}, {
.name = "s3c2412-nand",
.driver_data = TYPE_S3C2412,
}, {
.name = "s3c6400-nand",
.driver_data = TYPE_S3C2412, /* compatible with 2412 */
},
{ }
};
MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
static struct platform_driver s3c24xx_nand_driver = {
.probe = s3c24xx_nand_probe,
.remove_new = s3c24xx_nand_remove,
.suspend = s3c24xx_nand_suspend,
.resume = s3c24xx_nand_resume,
.id_table = s3c24xx_driver_ids,
.driver = {
.name = "s3c24xx-nand",
.of_match_table = s3c24xx_nand_dt_ids,
},
};
module_platform_driver(s3c24xx_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <[email protected]>");
MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
| linux-master | drivers/mtd/nand/raw/s3c2410.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2005, 2006 Red Hat Inc.
*
* Author: David Woodhouse <[email protected]>
* Tom Sylla <[email protected]>
*
* Overview:
* This is a device driver for the NAND flash controller found on
* the AMD CS5535/CS5536 companion chipsets for the Geode processor.
* mtd-id for command line partitioning is cs553x_nand_cs[0-3]
* where 0-3 reflects the chip select for NAND.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/iopoll.h>
#include <asm/msr.h>
#define NR_CS553X_CONTROLLERS 4
#define MSR_DIVIL_GLD_CAP 0x51400000 /* DIVIL capabilitiies */
#define CAP_CS5535 0x2df000ULL
#define CAP_CS5536 0x5df500ULL
/* NAND Timing MSRs */
#define MSR_NANDF_DATA 0x5140001b /* NAND Flash Data Timing MSR */
#define MSR_NANDF_CTL 0x5140001c /* NAND Flash Control Timing */
#define MSR_NANDF_RSVD 0x5140001d /* Reserved */
/* NAND BAR MSRs */
#define MSR_DIVIL_LBAR_FLSH0 0x51400010 /* Flash Chip Select 0 */
#define MSR_DIVIL_LBAR_FLSH1 0x51400011 /* Flash Chip Select 1 */
#define MSR_DIVIL_LBAR_FLSH2 0x51400012 /* Flash Chip Select 2 */
#define MSR_DIVIL_LBAR_FLSH3 0x51400013 /* Flash Chip Select 3 */
/* Each made up of... */
#define FLSH_LBAR_EN (1ULL<<32)
#define FLSH_NOR_NAND (1ULL<<33) /* 1 for NAND */
#define FLSH_MEM_IO (1ULL<<34) /* 1 for MMIO */
/* I/O BARs have BASE_ADDR in bits 15:4, IO_MASK in 47:36 */
/* MMIO BARs have BASE_ADDR in bits 31:12, MEM_MASK in 63:44 */
/* Pin function selection MSR (IDE vs. flash on the IDE pins) */
#define MSR_DIVIL_BALL_OPTS 0x51400015
#define PIN_OPT_IDE (1<<0) /* 0 for flash, 1 for IDE */
/* Registers within the NAND flash controller BAR -- memory mapped */
#define MM_NAND_DATA 0x00 /* 0 to 0x7ff, in fact */
#define MM_NAND_CTL 0x800 /* Any even address 0x800-0x80e */
#define MM_NAND_IO 0x801 /* Any odd address 0x801-0x80f */
#define MM_NAND_STS 0x810
#define MM_NAND_ECC_LSB 0x811
#define MM_NAND_ECC_MSB 0x812
#define MM_NAND_ECC_COL 0x813
#define MM_NAND_LAC 0x814
#define MM_NAND_ECC_CTL 0x815
/* Registers within the NAND flash controller BAR -- I/O mapped */
#define IO_NAND_DATA 0x00 /* 0 to 3, in fact */
#define IO_NAND_CTL 0x04
#define IO_NAND_IO 0x05
#define IO_NAND_STS 0x06
#define IO_NAND_ECC_CTL 0x08
#define IO_NAND_ECC_LSB 0x09
#define IO_NAND_ECC_MSB 0x0a
#define IO_NAND_ECC_COL 0x0b
#define IO_NAND_LAC 0x0c
#define CS_NAND_CTL_DIST_EN (1<<4) /* Enable NAND Distract interrupt */
#define CS_NAND_CTL_RDY_INT_MASK (1<<3) /* Enable RDY/BUSY# interrupt */
#define CS_NAND_CTL_ALE (1<<2)
#define CS_NAND_CTL_CLE (1<<1)
#define CS_NAND_CTL_CE (1<<0) /* Keep low; 1 to reset */
#define CS_NAND_STS_FLASH_RDY (1<<3)
#define CS_NAND_CTLR_BUSY (1<<2)
#define CS_NAND_CMD_COMP (1<<1)
#define CS_NAND_DIST_ST (1<<0)
#define CS_NAND_ECC_PARITY (1<<2)
#define CS_NAND_ECC_CLRECC (1<<1)
#define CS_NAND_ECC_ENECC (1<<0)
struct cs553x_nand_controller {
struct nand_controller base;
struct nand_chip chip;
void __iomem *mmio;
};
static struct cs553x_nand_controller *
to_cs553x(struct nand_controller *controller)
{
return container_of(controller, struct cs553x_nand_controller, base);
}
static int cs553x_write_ctrl_byte(struct cs553x_nand_controller *cs553x,
u32 ctl, u8 data)
{
u8 status;
writeb(ctl, cs553x->mmio + MM_NAND_CTL);
writeb(data, cs553x->mmio + MM_NAND_IO);
return readb_poll_timeout_atomic(cs553x->mmio + MM_NAND_STS, status,
!(status & CS_NAND_CTLR_BUSY), 1,
100000);
}
static void cs553x_data_in(struct cs553x_nand_controller *cs553x, void *buf,
unsigned int len)
{
writeb(0, cs553x->mmio + MM_NAND_CTL);
while (unlikely(len > 0x800)) {
memcpy_fromio(buf, cs553x->mmio, 0x800);
buf += 0x800;
len -= 0x800;
}
memcpy_fromio(buf, cs553x->mmio, len);
}
static void cs553x_data_out(struct cs553x_nand_controller *cs553x,
const void *buf, unsigned int len)
{
writeb(0, cs553x->mmio + MM_NAND_CTL);
while (unlikely(len > 0x800)) {
memcpy_toio(cs553x->mmio, buf, 0x800);
buf += 0x800;
len -= 0x800;
}
memcpy_toio(cs553x->mmio, buf, len);
}
static int cs553x_wait_ready(struct cs553x_nand_controller *cs553x,
unsigned int timeout_ms)
{
u8 mask = CS_NAND_CTLR_BUSY | CS_NAND_STS_FLASH_RDY;
u8 status;
return readb_poll_timeout(cs553x->mmio + MM_NAND_STS, status,
(status & mask) == CS_NAND_STS_FLASH_RDY, 100,
timeout_ms * 1000);
}
static int cs553x_exec_instr(struct cs553x_nand_controller *cs553x,
const struct nand_op_instr *instr)
{
unsigned int i;
int ret = 0;
switch (instr->type) {
case NAND_OP_CMD_INSTR:
ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_CLE,
instr->ctx.cmd.opcode);
break;
case NAND_OP_ADDR_INSTR:
for (i = 0; i < instr->ctx.addr.naddrs; i++) {
ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_ALE,
instr->ctx.addr.addrs[i]);
if (ret)
break;
}
break;
case NAND_OP_DATA_IN_INSTR:
cs553x_data_in(cs553x, instr->ctx.data.buf.in,
instr->ctx.data.len);
break;
case NAND_OP_DATA_OUT_INSTR:
cs553x_data_out(cs553x, instr->ctx.data.buf.out,
instr->ctx.data.len);
break;
case NAND_OP_WAITRDY_INSTR:
ret = cs553x_wait_ready(cs553x, instr->ctx.waitrdy.timeout_ms);
break;
}
if (instr->delay_ns)
ndelay(instr->delay_ns);
return ret;
}
static int cs553x_exec_op(struct nand_chip *this,
const struct nand_operation *op,
bool check_only)
{
struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
unsigned int i;
int ret;
if (check_only)
return true;
/* De-assert the CE pin */
writeb(0, cs553x->mmio + MM_NAND_CTL);
for (i = 0; i < op->ninstrs; i++) {
ret = cs553x_exec_instr(cs553x, &op->instrs[i]);
if (ret)
break;
}
/* Re-assert the CE pin. */
writeb(CS_NAND_CTL_CE, cs553x->mmio + MM_NAND_CTL);
return ret;
}
static void cs_enable_hwecc(struct nand_chip *this, int mode)
{
struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
writeb(0x07, cs553x->mmio + MM_NAND_ECC_CTL);
}
static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat,
u_char *ecc_code)
{
struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
uint32_t ecc;
ecc = readl(cs553x->mmio + MM_NAND_STS);
ecc_code[1] = ecc >> 8;
ecc_code[0] = ecc >> 16;
ecc_code[2] = ecc >> 24;
return 0;
}
static struct cs553x_nand_controller *controllers[4];
static int cs553x_attach_chip(struct nand_chip *chip)
{
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return 0;
chip->ecc.size = 256;
chip->ecc.bytes = 3;
chip->ecc.hwctl = cs_enable_hwecc;
chip->ecc.calculate = cs_calculate_ecc;
chip->ecc.correct = rawnand_sw_hamming_correct;
chip->ecc.strength = 1;
return 0;
}
static const struct nand_controller_ops cs553x_nand_controller_ops = {
.exec_op = cs553x_exec_op,
.attach_chip = cs553x_attach_chip,
};
static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
{
struct cs553x_nand_controller *controller;
int err = 0;
struct nand_chip *this;
struct mtd_info *new_mtd;
pr_notice("Probing CS553x NAND controller CS#%d at %sIO 0x%08lx\n",
cs, mmio ? "MM" : "P", adr);
if (!mmio) {
pr_notice("PIO mode not yet implemented for CS553X NAND controller\n");
return -ENXIO;
}
/* Allocate memory for MTD device structure and private data */
controller = kzalloc(sizeof(*controller), GFP_KERNEL);
if (!controller) {
err = -ENOMEM;
goto out;
}
this = &controller->chip;
nand_controller_init(&controller->base);
controller->base.ops = &cs553x_nand_controller_ops;
this->controller = &controller->base;
new_mtd = nand_to_mtd(this);
/* Link the private data with the MTD structure */
new_mtd->owner = THIS_MODULE;
/* map physical address */
controller->mmio = ioremap(adr, 4096);
if (!controller->mmio) {
pr_warn("ioremap cs553x NAND @0x%08lx failed\n", adr);
err = -EIO;
goto out_mtd;
}
/* Enable the following for a flash based bad block table */
this->bbt_options = NAND_BBT_USE_FLASH;
new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
if (!new_mtd->name) {
err = -ENOMEM;
goto out_ior;
}
/* Scan to find existence of the device */
err = nand_scan(this, 1);
if (err)
goto out_free;
controllers[cs] = controller;
goto out;
out_free:
kfree(new_mtd->name);
out_ior:
iounmap(controller->mmio);
out_mtd:
kfree(controller);
out:
return err;
}
static int is_geode(void)
{
/* These are the CPUs which will have a CS553[56] companion chip */
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86 == 5 &&
boot_cpu_data.x86_model == 10)
return 1; /* Geode LX */
if ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC ||
boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX) &&
boot_cpu_data.x86 == 5 &&
boot_cpu_data.x86_model == 5)
return 1; /* Geode GX (née GX2) */
return 0;
}
static int __init cs553x_init(void)
{
int err = -ENXIO;
int i;
uint64_t val;
/* If the CPU isn't a Geode GX or LX, abort */
if (!is_geode())
return -ENXIO;
/* If it doesn't have the CS553[56], abort */
rdmsrl(MSR_DIVIL_GLD_CAP, val);
val &= ~0xFFULL;
if (val != CAP_CS5535 && val != CAP_CS5536)
return -ENXIO;
/* If it doesn't have the NAND controller enabled, abort */
rdmsrl(MSR_DIVIL_BALL_OPTS, val);
if (val & PIN_OPT_IDE) {
pr_info("CS553x NAND controller: Flash I/O not enabled in MSR_DIVIL_BALL_OPTS.\n");
return -ENXIO;
}
for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
rdmsrl(MSR_DIVIL_LBAR_FLSH0 + i, val);
if ((val & (FLSH_LBAR_EN|FLSH_NOR_NAND)) == (FLSH_LBAR_EN|FLSH_NOR_NAND))
err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF);
}
/* Register all devices together here. This means we can easily hack it to
do mtdconcat etc. if we want to. */
for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
if (controllers[i]) {
/* If any devices registered, return success. Else the last error. */
mtd_device_register(nand_to_mtd(&controllers[i]->chip),
NULL, 0);
err = 0;
}
}
return err;
}
module_init(cs553x_init);
static void __exit cs553x_cleanup(void)
{
int i;
for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
struct cs553x_nand_controller *controller = controllers[i];
struct nand_chip *this = &controller->chip;
struct mtd_info *mtd = nand_to_mtd(this);
int ret;
if (!mtd)
continue;
/* Release resources, unregister device */
ret = mtd_device_unregister(mtd);
WARN_ON(ret);
nand_cleanup(this);
kfree(mtd->name);
controllers[i] = NULL;
/* unmap physical address */
iounmap(controller->mmio);
/* Free the MTD device structure */
kfree(controller);
}
}
module_exit(cs553x_cleanup);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]>");
MODULE_DESCRIPTION("NAND controller driver for AMD CS5535/CS5536 companion chip");
| linux-master | drivers/mtd/nand/raw/cs553x_nand.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2004 Texas Instruments, Jian Zhang <[email protected]>
* Copyright © 2004 Micron Technology Inc.
* Copyright © 2004 David Brownell
*/
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand-ecc-sw-bch.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/omap-dma.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_data/elm.h>
#include <linux/omap-gpmc.h>
#include <linux/platform_data/mtd-nand-omap2.h>
#define DRIVER_NAME "omap2-nand"
#define OMAP_NAND_TIMEOUT_MS 5000
#define NAND_Ecc_P1e (1 << 0)
#define NAND_Ecc_P2e (1 << 1)
#define NAND_Ecc_P4e (1 << 2)
#define NAND_Ecc_P8e (1 << 3)
#define NAND_Ecc_P16e (1 << 4)
#define NAND_Ecc_P32e (1 << 5)
#define NAND_Ecc_P64e (1 << 6)
#define NAND_Ecc_P128e (1 << 7)
#define NAND_Ecc_P256e (1 << 8)
#define NAND_Ecc_P512e (1 << 9)
#define NAND_Ecc_P1024e (1 << 10)
#define NAND_Ecc_P2048e (1 << 11)
#define NAND_Ecc_P1o (1 << 16)
#define NAND_Ecc_P2o (1 << 17)
#define NAND_Ecc_P4o (1 << 18)
#define NAND_Ecc_P8o (1 << 19)
#define NAND_Ecc_P16o (1 << 20)
#define NAND_Ecc_P32o (1 << 21)
#define NAND_Ecc_P64o (1 << 22)
#define NAND_Ecc_P128o (1 << 23)
#define NAND_Ecc_P256o (1 << 24)
#define NAND_Ecc_P512o (1 << 25)
#define NAND_Ecc_P1024o (1 << 26)
#define NAND_Ecc_P2048o (1 << 27)
#define TF(value) (value ? 1 : 0)
#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
#define PREFETCH_CONFIG1_CS_SHIFT 24
#define ECC_CONFIG_CS_SHIFT 1
#define CS_MASK 0x7
#define ENABLE_PREFETCH (0x1 << 7)
#define DMA_MPU_MODE_SHIFT 2
#define ECCSIZE0_SHIFT 12
#define ECCSIZE1_SHIFT 22
#define ECC1RESULTSIZE 0x1
#define ECCCLEAR 0x100
#define ECC1 0x1
#define PREFETCH_FIFOTHRESHOLD_MAX 0x40
#define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8)
#define PREFETCH_STATUS_COUNT(val) (val & 0x00003fff)
#define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F)
#define STATUS_BUFF_EMPTY 0x00000001
#define SECTOR_BYTES 512
/* 4 bit padding to make byte aligned, 56 = 52 + 4 */
#define BCH4_BIT_PAD 4
/* GPMC ecc engine settings for read */
#define BCH_WRAPMODE_1 1 /* BCH wrap mode 1 */
#define BCH8R_ECC_SIZE0 0x1a /* ecc_size0 = 26 */
#define BCH8R_ECC_SIZE1 0x2 /* ecc_size1 = 2 */
#define BCH4R_ECC_SIZE0 0xd /* ecc_size0 = 13 */
#define BCH4R_ECC_SIZE1 0x3 /* ecc_size1 = 3 */
/* GPMC ecc engine settings for write */
#define BCH_WRAPMODE_6 6 /* BCH wrap mode 6 */
#define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */
#define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */
#define BBM_LEN 2
static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55,
0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78,
0x48, 0x76, 0xa9, 0x3b, 0x97, 0xd1, 0x7a, 0x93,
0x07, 0x0e};
static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
0xac, 0x6b, 0xff, 0x99, 0x7b};
static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
struct omap_nand_info {
struct nand_chip nand;
struct platform_device *pdev;
int gpmc_cs;
bool dev_ready;
enum nand_io xfer_type;
enum omap_ecc ecc_opt;
struct device_node *elm_of_node;
unsigned long phys_base;
struct completion comp;
struct dma_chan *dma;
int gpmc_irq_fifo;
int gpmc_irq_count;
enum {
OMAP_NAND_IO_READ = 0, /* read */
OMAP_NAND_IO_WRITE, /* write */
} iomode;
u_char *buf;
int buf_len;
/* Interface to GPMC */
void __iomem *fifo;
struct gpmc_nand_regs reg;
struct gpmc_nand_ops *ops;
bool flash_bbt;
/* fields specific for BCHx_HW ECC scheme */
struct device *elm_dev;
/* NAND ready gpio */
struct gpio_desc *ready_gpiod;
unsigned int neccpg;
unsigned int nsteps_per_eccpg;
unsigned int eccpg_size;
unsigned int eccpg_bytes;
void (*data_in)(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit);
void (*data_out)(struct nand_chip *chip,
const void *buf, unsigned int len,
bool force_8bit);
};
static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
{
return container_of(mtd_to_nand(mtd), struct omap_nand_info, nand);
}
static void omap_nand_data_in(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit);
static void omap_nand_data_out(struct nand_chip *chip,
const void *buf, unsigned int len,
bool force_8bit);
/**
* omap_prefetch_enable - configures and starts prefetch transfer
* @cs: cs (chip select) number
* @fifo_th: fifo threshold to be used for read/ write
* @dma_mode: dma mode enable (1) or disable (0)
* @u32_count: number of bytes to be transferred
* @is_write: prefetch read(0) or write post(1) mode
* @info: NAND device structure containing platform data
*/
static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
unsigned int u32_count, int is_write, struct omap_nand_info *info)
{
u32 val;
if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
return -1;
if (readl(info->reg.gpmc_prefetch_control))
return -EBUSY;
/* Set the amount of bytes to be prefetched */
writel(u32_count, info->reg.gpmc_prefetch_config2);
/* Set dma/mpu mode, the prefetch read / post write and
* enable the engine. Set which cs is has requested for.
*/
val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
(dma_mode << DMA_MPU_MODE_SHIFT) | (is_write & 0x1));
writel(val, info->reg.gpmc_prefetch_config1);
/* Start the prefetch engine */
writel(0x1, info->reg.gpmc_prefetch_control);
return 0;
}
/*
* omap_prefetch_reset - disables and stops the prefetch engine
*/
static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
{
u32 config1;
/* check if the same module/cs is trying to reset */
config1 = readl(info->reg.gpmc_prefetch_config1);
if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
return -EINVAL;
/* Stop the PFPW engine */
writel(0x0, info->reg.gpmc_prefetch_control);
/* Reset/disable the PFPW engine */
writel(0x0, info->reg.gpmc_prefetch_config1);
return 0;
}
/**
* omap_nand_data_in_pref - NAND data in using prefetch engine
*/
static void omap_nand_data_in_pref(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
uint32_t r_count = 0;
int ret = 0;
u32 *p = (u32 *)buf;
unsigned int pref_len;
if (force_8bit) {
omap_nand_data_in(chip, buf, len, force_8bit);
return;
}
/* read 32-bit words using prefetch and remaining bytes normally */
/* configure and start prefetch transfer */
pref_len = len - (len & 3);
ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX, 0x0, pref_len, 0x0, info);
if (ret) {
/* prefetch engine is busy, use CPU copy method */
omap_nand_data_in(chip, buf, len, false);
} else {
do {
r_count = readl(info->reg.gpmc_prefetch_status);
r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
r_count = r_count >> 2;
ioread32_rep(info->fifo, p, r_count);
p += r_count;
pref_len -= r_count << 2;
} while (pref_len);
/* disable and stop the Prefetch engine */
omap_prefetch_reset(info->gpmc_cs, info);
/* fetch any remaining bytes */
if (len & 3)
omap_nand_data_in(chip, p, len & 3, false);
}
}
/**
* omap_nand_data_out_pref - NAND data out using Write Posting engine
*/
static void omap_nand_data_out_pref(struct nand_chip *chip,
const void *buf, unsigned int len,
bool force_8bit)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
uint32_t w_count = 0;
int i = 0, ret = 0;
u16 *p = (u16 *)buf;
unsigned long tim, limit;
u32 val;
if (force_8bit) {
omap_nand_data_out(chip, buf, len, force_8bit);
return;
}
/* take care of subpage writes */
if (len % 2 != 0) {
writeb(*(u8 *)buf, info->fifo);
p = (u16 *)(buf + 1);
len--;
}
/* configure and start prefetch transfer */
ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
if (ret) {
/* write posting engine is busy, use CPU copy method */
omap_nand_data_out(chip, buf, len, false);
} else {
while (len) {
w_count = readl(info->reg.gpmc_prefetch_status);
w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
w_count = w_count >> 1;
for (i = 0; (i < w_count) && len; i++, len -= 2)
iowrite16(*p++, info->fifo);
}
/* wait for data to flushed-out before reset the prefetch */
tim = 0;
limit = (loops_per_jiffy *
msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
do {
cpu_relax();
val = readl(info->reg.gpmc_prefetch_status);
val = PREFETCH_STATUS_COUNT(val);
} while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
omap_prefetch_reset(info->gpmc_cs, info);
}
}
/*
* omap_nand_dma_callback: callback on the completion of dma transfer
* @data: pointer to completion data structure
*/
static void omap_nand_dma_callback(void *data)
{
complete((struct completion *) data);
}
/*
* omap_nand_dma_transfer: configure and start dma transfer
* @chip: nand chip structure
* @addr: virtual address in RAM of source/destination
* @len: number of data bytes to be transferred
* @is_write: flag for read/write operation
*/
static inline int omap_nand_dma_transfer(struct nand_chip *chip,
const void *addr, unsigned int len,
int is_write)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
struct dma_async_tx_descriptor *tx;
enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
DMA_FROM_DEVICE;
struct scatterlist sg;
unsigned long tim, limit;
unsigned n;
int ret;
u32 val;
if (!virt_addr_valid(addr))
goto out_copy;
sg_init_one(&sg, addr, len);
n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
if (n == 0) {
dev_err(&info->pdev->dev,
"Couldn't DMA map a %d byte buffer\n", len);
goto out_copy;
}
tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx)
goto out_copy_unmap;
tx->callback = omap_nand_dma_callback;
tx->callback_param = &info->comp;
dmaengine_submit(tx);
init_completion(&info->comp);
/* setup and start DMA using dma_addr */
dma_async_issue_pending(info->dma);
/* configure and start prefetch transfer */
ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
if (ret)
/* PFPW engine is busy, use cpu copy method */
goto out_copy_unmap;
wait_for_completion(&info->comp);
tim = 0;
limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
do {
cpu_relax();
val = readl(info->reg.gpmc_prefetch_status);
val = PREFETCH_STATUS_COUNT(val);
} while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
omap_prefetch_reset(info->gpmc_cs, info);
dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
return 0;
out_copy_unmap:
dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
out_copy:
is_write == 0 ? omap_nand_data_in(chip, (void *)addr, len, false)
: omap_nand_data_out(chip, addr, len, false);
return 0;
}
/**
* omap_nand_data_in_dma_pref - NAND data in using DMA and Prefetch
*/
static void omap_nand_data_in_dma_pref(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (force_8bit) {
omap_nand_data_in(chip, buf, len, force_8bit);
return;
}
if (len <= mtd->oobsize)
omap_nand_data_in_pref(chip, buf, len, false);
else
/* start transfer in DMA mode */
omap_nand_dma_transfer(chip, buf, len, 0x0);
}
/**
* omap_nand_data_out_dma_pref - NAND data out using DMA and write posting
*/
static void omap_nand_data_out_dma_pref(struct nand_chip *chip,
const void *buf, unsigned int len,
bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (force_8bit) {
omap_nand_data_out(chip, buf, len, force_8bit);
return;
}
if (len <= mtd->oobsize)
omap_nand_data_out_pref(chip, buf, len, false);
else
/* start transfer in DMA mode */
omap_nand_dma_transfer(chip, buf, len, 0x1);
}
/*
* omap_nand_irq - GPMC irq handler
* @this_irq: gpmc irq number
* @dev: omap_nand_info structure pointer is passed here
*/
static irqreturn_t omap_nand_irq(int this_irq, void *dev)
{
struct omap_nand_info *info = (struct omap_nand_info *) dev;
u32 bytes;
bytes = readl(info->reg.gpmc_prefetch_status);
bytes = PREFETCH_STATUS_FIFO_CNT(bytes);
bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
if (this_irq == info->gpmc_irq_count)
goto done;
if (info->buf_len && (info->buf_len < bytes))
bytes = info->buf_len;
else if (!info->buf_len)
bytes = 0;
iowrite32_rep(info->fifo, (u32 *)info->buf,
bytes >> 2);
info->buf = info->buf + bytes;
info->buf_len -= bytes;
} else {
ioread32_rep(info->fifo, (u32 *)info->buf,
bytes >> 2);
info->buf = info->buf + bytes;
if (this_irq == info->gpmc_irq_count)
goto done;
}
return IRQ_HANDLED;
done:
complete(&info->comp);
disable_irq_nosync(info->gpmc_irq_fifo);
disable_irq_nosync(info->gpmc_irq_count);
return IRQ_HANDLED;
}
/*
* omap_nand_data_in_irq_pref - NAND data in using Prefetch and IRQ
*/
static void omap_nand_data_in_irq_pref(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
struct mtd_info *mtd = nand_to_mtd(&info->nand);
int ret = 0;
if (len <= mtd->oobsize || force_8bit) {
omap_nand_data_in(chip, buf, len, force_8bit);
return;
}
info->iomode = OMAP_NAND_IO_READ;
info->buf = buf;
init_completion(&info->comp);
/* configure and start prefetch transfer */
ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
omap_nand_data_in(chip, buf, len, false);
return;
}
info->buf_len = len;
enable_irq(info->gpmc_irq_count);
enable_irq(info->gpmc_irq_fifo);
/* waiting for read to complete */
wait_for_completion(&info->comp);
/* disable and stop the PFPW engine */
omap_prefetch_reset(info->gpmc_cs, info);
return;
}
/*
* omap_nand_data_out_irq_pref - NAND out using write posting and IRQ
*/
static void omap_nand_data_out_irq_pref(struct nand_chip *chip,
const void *buf, unsigned int len,
bool force_8bit)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
struct mtd_info *mtd = nand_to_mtd(&info->nand);
int ret = 0;
unsigned long tim, limit;
u32 val;
if (len <= mtd->oobsize || force_8bit) {
omap_nand_data_out(chip, buf, len, force_8bit);
return;
}
info->iomode = OMAP_NAND_IO_WRITE;
info->buf = (u_char *) buf;
init_completion(&info->comp);
/* configure and start prefetch transfer : size=24 */
ret = omap_prefetch_enable(info->gpmc_cs,
(PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
omap_nand_data_out(chip, buf, len, false);
return;
}
info->buf_len = len;
enable_irq(info->gpmc_irq_count);
enable_irq(info->gpmc_irq_fifo);
/* waiting for write to complete */
wait_for_completion(&info->comp);
/* wait for data to flushed-out before reset the prefetch */
tim = 0;
limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
do {
val = readl(info->reg.gpmc_prefetch_status);
val = PREFETCH_STATUS_COUNT(val);
cpu_relax();
} while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
omap_prefetch_reset(info->gpmc_cs, info);
return;
}
/**
* gen_true_ecc - This function will generate true ECC value
* @ecc_buf: buffer to store ecc code
*
* This generated true ECC value can be used when correcting
* data read from NAND flash memory core
*/
static void gen_true_ecc(u8 *ecc_buf)
{
u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
P1e(tmp) | P2048o(tmp) | P2048e(tmp));
}
/**
* omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
* @ecc_data1: ecc code from nand spare area
* @ecc_data2: ecc code from hardware register obtained from hardware ecc
* @page_data: page data
*
* This function compares two ECC's and indicates if there is an error.
* If the error can be corrected it will be corrected to the buffer.
* If there is no error, %0 is returned. If there is an error but it
* was corrected, %1 is returned. Otherwise, %-1 is returned.
*/
static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
u8 *ecc_data2, /* read from register */
u8 *page_data)
{
uint i;
u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
u8 ecc_bit[24];
u8 ecc_sum = 0;
u8 find_bit = 0;
uint find_byte = 0;
int isEccFF;
isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
gen_true_ecc(ecc_data1);
gen_true_ecc(ecc_data2);
for (i = 0; i <= 2; i++) {
*(ecc_data1 + i) = ~(*(ecc_data1 + i));
*(ecc_data2 + i) = ~(*(ecc_data2 + i));
}
for (i = 0; i < 8; i++) {
tmp0_bit[i] = *ecc_data1 % 2;
*ecc_data1 = *ecc_data1 / 2;
}
for (i = 0; i < 8; i++) {
tmp1_bit[i] = *(ecc_data1 + 1) % 2;
*(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
}
for (i = 0; i < 8; i++) {
tmp2_bit[i] = *(ecc_data1 + 2) % 2;
*(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
}
for (i = 0; i < 8; i++) {
comp0_bit[i] = *ecc_data2 % 2;
*ecc_data2 = *ecc_data2 / 2;
}
for (i = 0; i < 8; i++) {
comp1_bit[i] = *(ecc_data2 + 1) % 2;
*(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
}
for (i = 0; i < 8; i++) {
comp2_bit[i] = *(ecc_data2 + 2) % 2;
*(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
}
for (i = 0; i < 6; i++)
ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
for (i = 0; i < 8; i++)
ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
for (i = 0; i < 8; i++)
ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
for (i = 0; i < 24; i++)
ecc_sum += ecc_bit[i];
switch (ecc_sum) {
case 0:
/* Not reached because this function is not called if
* ECC values are equal
*/
return 0;
case 1:
/* Uncorrectable error */
pr_debug("ECC UNCORRECTED_ERROR 1\n");
return -EBADMSG;
case 11:
/* UN-Correctable error */
pr_debug("ECC UNCORRECTED_ERROR B\n");
return -EBADMSG;
case 12:
/* Correctable error */
find_byte = (ecc_bit[23] << 8) +
(ecc_bit[21] << 7) +
(ecc_bit[19] << 6) +
(ecc_bit[17] << 5) +
(ecc_bit[15] << 4) +
(ecc_bit[13] << 3) +
(ecc_bit[11] << 2) +
(ecc_bit[9] << 1) +
ecc_bit[7];
find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
pr_debug("Correcting single bit ECC error at offset: "
"%d, bit: %d\n", find_byte, find_bit);
page_data[find_byte] ^= (1 << find_bit);
return 1;
default:
if (isEccFF) {
if (ecc_data2[0] == 0 &&
ecc_data2[1] == 0 &&
ecc_data2[2] == 0)
return 0;
}
pr_debug("UNCORRECTED_ERROR default\n");
return -EBADMSG;
}
}
/**
* omap_correct_data - Compares the ECC read with HW generated ECC
* @chip: NAND chip object
* @dat: page data
* @read_ecc: ecc read from nand flash
* @calc_ecc: ecc read from HW ECC registers
*
* Compares the ecc read from nand spare area with ECC registers values
* and if ECC's mismatched, it will call 'omap_compare_ecc' for error
* detection and correction. If there are no errors, %0 is returned. If
* there were errors and all of the errors were corrected, the number of
* corrected errors is returned. If uncorrectable errors exist, %-1 is
* returned.
*/
static int omap_correct_data(struct nand_chip *chip, u_char *dat,
u_char *read_ecc, u_char *calc_ecc)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
int blockCnt = 0, i = 0, ret = 0;
int stat = 0;
/* Ex NAND_ECC_HW12_2048 */
if (info->nand.ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST &&
info->nand.ecc.size == 2048)
blockCnt = 4;
else
blockCnt = 1;
for (i = 0; i < blockCnt; i++) {
if (memcmp(read_ecc, calc_ecc, 3) != 0) {
ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
if (ret < 0)
return ret;
/* keep track of the number of corrected errors */
stat += ret;
}
read_ecc += 3;
calc_ecc += 3;
dat += 512;
}
return stat;
}
/**
* omap_calculate_ecc - Generate non-inverted ECC bytes.
* @chip: NAND chip object
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
*
* Using noninverted ECC can be considered ugly since writing a blank
* page ie. padding will clear the ECC bytes. This is no problem as long
* nobody is trying to write data on the seemingly unused page. Reading
* an erased page will produce an ECC mismatch between generated and read
* ECC bytes that has to be dealt with separately.
*/
static int omap_calculate_ecc(struct nand_chip *chip, const u_char *dat,
u_char *ecc_code)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
u32 val;
val = readl(info->reg.gpmc_ecc_config);
if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
return -EINVAL;
/* read ecc result */
val = readl(info->reg.gpmc_ecc1_result);
*ecc_code++ = val; /* P128e, ..., P1e */
*ecc_code++ = val >> 16; /* P128o, ..., P1o */
/* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
*ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
return 0;
}
/**
* omap_enable_hwecc - This function enables the hardware ecc functionality
* @chip: NAND chip object
* @mode: Read/Write mode
*/
static void omap_enable_hwecc(struct nand_chip *chip, int mode)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
u32 val;
/* clear ecc and enable bits */
val = ECCCLEAR | ECC1;
writel(val, info->reg.gpmc_ecc_control);
/* program ecc and result sizes */
val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
ECC1RESULTSIZE);
writel(val, info->reg.gpmc_ecc_size_config);
switch (mode) {
case NAND_ECC_READ:
case NAND_ECC_WRITE:
writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
break;
case NAND_ECC_READSYN:
writel(ECCCLEAR, info->reg.gpmc_ecc_control);
break;
default:
dev_info(&info->pdev->dev,
"error: unrecognized Mode[%d]!\n", mode);
break;
}
/* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
writel(val, info->reg.gpmc_ecc_config);
}
/**
* omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
* @chip: NAND chip object
* @mode: Read/Write mode
*
* When using BCH with SW correction (i.e. no ELM), sector size is set
* to 512 bytes and we use BCH_WRAPMODE_6 wrapping mode
* for both reading and writing with:
* eccsize0 = 0 (no additional protected byte in spare area)
* eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
*/
static void __maybe_unused omap_enable_hwecc_bch(struct nand_chip *chip,
int mode)
{
unsigned int bch_type;
unsigned int dev_width, nsectors;
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
enum omap_ecc ecc_opt = info->ecc_opt;
u32 val, wr_mode;
unsigned int ecc_size1, ecc_size0;
/* GPMC configurations for calculating ECC */
switch (ecc_opt) {
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
bch_type = 0;
nsectors = 1;
wr_mode = BCH_WRAPMODE_6;
ecc_size0 = BCH_ECC_SIZE0;
ecc_size1 = BCH_ECC_SIZE1;
break;
case OMAP_ECC_BCH4_CODE_HW:
bch_type = 0;
nsectors = chip->ecc.steps;
if (mode == NAND_ECC_READ) {
wr_mode = BCH_WRAPMODE_1;
ecc_size0 = BCH4R_ECC_SIZE0;
ecc_size1 = BCH4R_ECC_SIZE1;
} else {
wr_mode = BCH_WRAPMODE_6;
ecc_size0 = BCH_ECC_SIZE0;
ecc_size1 = BCH_ECC_SIZE1;
}
break;
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
bch_type = 1;
nsectors = 1;
wr_mode = BCH_WRAPMODE_6;
ecc_size0 = BCH_ECC_SIZE0;
ecc_size1 = BCH_ECC_SIZE1;
break;
case OMAP_ECC_BCH8_CODE_HW:
bch_type = 1;
nsectors = chip->ecc.steps;
if (mode == NAND_ECC_READ) {
wr_mode = BCH_WRAPMODE_1;
ecc_size0 = BCH8R_ECC_SIZE0;
ecc_size1 = BCH8R_ECC_SIZE1;
} else {
wr_mode = BCH_WRAPMODE_6;
ecc_size0 = BCH_ECC_SIZE0;
ecc_size1 = BCH_ECC_SIZE1;
}
break;
case OMAP_ECC_BCH16_CODE_HW:
bch_type = 0x2;
nsectors = chip->ecc.steps;
if (mode == NAND_ECC_READ) {
wr_mode = 0x01;
ecc_size0 = 52; /* ECC bits in nibbles per sector */
ecc_size1 = 0; /* non-ECC bits in nibbles per sector */
} else {
wr_mode = 0x01;
ecc_size0 = 0; /* extra bits in nibbles per sector */
ecc_size1 = 52; /* OOB bits in nibbles per sector */
}
break;
default:
return;
}
writel(ECC1, info->reg.gpmc_ecc_control);
/* Configure ecc size for BCH */
val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
writel(val, info->reg.gpmc_ecc_size_config);
dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
/* BCH configuration */
val = ((1 << 16) | /* enable BCH */
(bch_type << 12) | /* BCH4/BCH8/BCH16 */
(wr_mode << 8) | /* wrap mode */
(dev_width << 7) | /* bus width */
(((nsectors-1) & 0x7) << 4) | /* number of sectors */
(info->gpmc_cs << 1) | /* ECC CS */
(0x1)); /* enable ECC */
writel(val, info->reg.gpmc_ecc_config);
/* Clear ecc and enable bits */
writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
}
static u8 bch4_polynomial[] = {0x28, 0x13, 0xcc, 0x39, 0x96, 0xac, 0x7f};
static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
0x97, 0x79, 0xe5, 0x24, 0xb5};
/**
* _omap_calculate_ecc_bch - Generate ECC bytes for one sector
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
* @ecc_calc: The ecc_code buffer
* @i: The sector number (for a multi sector page)
*
* Support calculating of BCH4/8/16 ECC vectors for one sector
* within a page. Sector number is in @i.
*/
static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_calc, int i)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
int eccbytes = info->nand.ecc.bytes;
struct gpmc_nand_regs *gpmc_regs = &info->reg;
u8 *ecc_code;
unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
u32 val;
int j;
ecc_code = ecc_calc;
switch (info->ecc_opt) {
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
case OMAP_ECC_BCH8_CODE_HW:
bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
*ecc_code++ = (bch_val4 & 0xFF);
*ecc_code++ = ((bch_val3 >> 24) & 0xFF);
*ecc_code++ = ((bch_val3 >> 16) & 0xFF);
*ecc_code++ = ((bch_val3 >> 8) & 0xFF);
*ecc_code++ = (bch_val3 & 0xFF);
*ecc_code++ = ((bch_val2 >> 24) & 0xFF);
*ecc_code++ = ((bch_val2 >> 16) & 0xFF);
*ecc_code++ = ((bch_val2 >> 8) & 0xFF);
*ecc_code++ = (bch_val2 & 0xFF);
*ecc_code++ = ((bch_val1 >> 24) & 0xFF);
*ecc_code++ = ((bch_val1 >> 16) & 0xFF);
*ecc_code++ = ((bch_val1 >> 8) & 0xFF);
*ecc_code++ = (bch_val1 & 0xFF);
break;
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
case OMAP_ECC_BCH4_CODE_HW:
bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
*ecc_code++ = ((bch_val2 >> 12) & 0xFF);
*ecc_code++ = ((bch_val2 >> 4) & 0xFF);
*ecc_code++ = ((bch_val2 & 0xF) << 4) |
((bch_val1 >> 28) & 0xF);
*ecc_code++ = ((bch_val1 >> 20) & 0xFF);
*ecc_code++ = ((bch_val1 >> 12) & 0xFF);
*ecc_code++ = ((bch_val1 >> 4) & 0xFF);
*ecc_code++ = ((bch_val1 & 0xF) << 4);
break;
case OMAP_ECC_BCH16_CODE_HW:
val = readl(gpmc_regs->gpmc_bch_result6[i]);
ecc_code[0] = ((val >> 8) & 0xFF);
ecc_code[1] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result5[i]);
ecc_code[2] = ((val >> 24) & 0xFF);
ecc_code[3] = ((val >> 16) & 0xFF);
ecc_code[4] = ((val >> 8) & 0xFF);
ecc_code[5] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result4[i]);
ecc_code[6] = ((val >> 24) & 0xFF);
ecc_code[7] = ((val >> 16) & 0xFF);
ecc_code[8] = ((val >> 8) & 0xFF);
ecc_code[9] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result3[i]);
ecc_code[10] = ((val >> 24) & 0xFF);
ecc_code[11] = ((val >> 16) & 0xFF);
ecc_code[12] = ((val >> 8) & 0xFF);
ecc_code[13] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result2[i]);
ecc_code[14] = ((val >> 24) & 0xFF);
ecc_code[15] = ((val >> 16) & 0xFF);
ecc_code[16] = ((val >> 8) & 0xFF);
ecc_code[17] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result1[i]);
ecc_code[18] = ((val >> 24) & 0xFF);
ecc_code[19] = ((val >> 16) & 0xFF);
ecc_code[20] = ((val >> 8) & 0xFF);
ecc_code[21] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result0[i]);
ecc_code[22] = ((val >> 24) & 0xFF);
ecc_code[23] = ((val >> 16) & 0xFF);
ecc_code[24] = ((val >> 8) & 0xFF);
ecc_code[25] = ((val >> 0) & 0xFF);
break;
default:
return -EINVAL;
}
/* ECC scheme specific syndrome customizations */
switch (info->ecc_opt) {
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
/* Add constant polynomial to remainder, so that
* ECC of blank pages results in 0x0 on reading back
*/
for (j = 0; j < eccbytes; j++)
ecc_calc[j] ^= bch4_polynomial[j];
break;
case OMAP_ECC_BCH4_CODE_HW:
/* Set 8th ECC byte as 0x0 for ROM compatibility */
ecc_calc[eccbytes - 1] = 0x0;
break;
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
/* Add constant polynomial to remainder, so that
* ECC of blank pages results in 0x0 on reading back
*/
for (j = 0; j < eccbytes; j++)
ecc_calc[j] ^= bch8_polynomial[j];
break;
case OMAP_ECC_BCH8_CODE_HW:
/* Set 14th ECC byte as 0x0 for ROM compatibility */
ecc_calc[eccbytes - 1] = 0x0;
break;
case OMAP_ECC_BCH16_CODE_HW:
break;
default:
return -EINVAL;
}
return 0;
}
/**
* omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
* @chip: NAND chip object
* @dat: The pointer to data on which ecc is computed
* @ecc_calc: Buffer storing the calculated ECC bytes
*
* Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
* when SW based correction is required as ECC is required for one sector
* at a time.
*/
static int omap_calculate_ecc_bch_sw(struct nand_chip *chip,
const u_char *dat, u_char *ecc_calc)
{
return _omap_calculate_ecc_bch(nand_to_mtd(chip), dat, ecc_calc, 0);
}
/**
* omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
* @ecc_calc: Buffer storing the calculated ECC bytes
*
* Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
*/
static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_calc)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
int eccbytes = info->nand.ecc.bytes;
unsigned long nsectors;
int i, ret;
nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
for (i = 0; i < nsectors; i++) {
ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
if (ret)
return ret;
ecc_calc += eccbytes;
}
return 0;
}
/**
* erased_sector_bitflips - count bit flips
* @data: data sector buffer
* @oob: oob buffer
* @info: omap_nand_info
*
* Check the bit flips in erased page falls below correctable level.
* If falls below, report the page as erased with correctable bit
* flip, else report as uncorrectable page.
*/
static int erased_sector_bitflips(u_char *data, u_char *oob,
struct omap_nand_info *info)
{
int flip_bits = 0, i;
for (i = 0; i < info->nand.ecc.size; i++) {
flip_bits += hweight8(~data[i]);
if (flip_bits > info->nand.ecc.strength)
return 0;
}
for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
flip_bits += hweight8(~oob[i]);
if (flip_bits > info->nand.ecc.strength)
return 0;
}
/*
* Bit flips falls in correctable level.
* Fill data area with 0xFF
*/
if (flip_bits) {
memset(data, 0xFF, info->nand.ecc.size);
memset(oob, 0xFF, info->nand.ecc.bytes);
}
return flip_bits;
}
/**
* omap_elm_correct_data - corrects page data area in case error reported
* @chip: NAND chip object
* @data: page data
* @read_ecc: ecc read from nand flash
* @calc_ecc: ecc read from HW ECC registers
*
* Calculated ecc vector reported as zero in case of non-error pages.
* In case of non-zero ecc vector, first filter out erased-pages, and
* then process data via ELM to detect bit-flips.
*/
static int omap_elm_correct_data(struct nand_chip *chip, u_char *data,
u_char *read_ecc, u_char *calc_ecc)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
struct nand_ecc_ctrl *ecc = &info->nand.ecc;
int eccsteps = info->nsteps_per_eccpg;
int i , j, stat = 0;
int eccflag, actual_eccbytes;
struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
u_char *ecc_vec = calc_ecc;
u_char *spare_ecc = read_ecc;
u_char *erased_ecc_vec;
u_char *buf;
int bitflip_count;
bool is_error_reported = false;
u32 bit_pos, byte_pos, error_max, pos;
int err;
switch (info->ecc_opt) {
case OMAP_ECC_BCH4_CODE_HW:
/* omit 7th ECC byte reserved for ROM code compatibility */
actual_eccbytes = ecc->bytes - 1;
erased_ecc_vec = bch4_vector;
break;
case OMAP_ECC_BCH8_CODE_HW:
/* omit 14th ECC byte reserved for ROM code compatibility */
actual_eccbytes = ecc->bytes - 1;
erased_ecc_vec = bch8_vector;
break;
case OMAP_ECC_BCH16_CODE_HW:
actual_eccbytes = ecc->bytes;
erased_ecc_vec = bch16_vector;
break;
default:
dev_err(&info->pdev->dev, "invalid driver configuration\n");
return -EINVAL;
}
/* Initialize elm error vector to zero */
memset(err_vec, 0, sizeof(err_vec));
for (i = 0; i < eccsteps ; i++) {
eccflag = 0; /* initialize eccflag */
/*
* Check any error reported,
* In case of error, non zero ecc reported.
*/
for (j = 0; j < actual_eccbytes; j++) {
if (calc_ecc[j] != 0) {
eccflag = 1; /* non zero ecc, error present */
break;
}
}
if (eccflag == 1) {
if (memcmp(calc_ecc, erased_ecc_vec,
actual_eccbytes) == 0) {
/*
* calc_ecc[] matches pattern for ECC(all 0xff)
* so this is definitely an erased-page
*/
} else {
buf = &data[info->nand.ecc.size * i];
/*
* count number of 0-bits in read_buf.
* This check can be removed once a similar
* check is introduced in generic NAND driver
*/
bitflip_count = erased_sector_bitflips(
buf, read_ecc, info);
if (bitflip_count) {
/*
* number of 0-bits within ECC limits
* So this may be an erased-page
*/
stat += bitflip_count;
} else {
/*
* Too many 0-bits. It may be a
* - programmed-page, OR
* - erased-page with many bit-flips
* So this page requires check by ELM
*/
err_vec[i].error_reported = true;
is_error_reported = true;
}
}
}
/* Update the ecc vector */
calc_ecc += ecc->bytes;
read_ecc += ecc->bytes;
}
/* Check if any error reported */
if (!is_error_reported)
return stat;
/* Decode BCH error using ELM module */
elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
err = 0;
for (i = 0; i < eccsteps; i++) {
if (err_vec[i].error_uncorrectable) {
dev_err(&info->pdev->dev,
"uncorrectable bit-flips found\n");
err = -EBADMSG;
} else if (err_vec[i].error_reported) {
for (j = 0; j < err_vec[i].error_count; j++) {
switch (info->ecc_opt) {
case OMAP_ECC_BCH4_CODE_HW:
/* Add 4 bits to take care of padding */
pos = err_vec[i].error_loc[j] +
BCH4_BIT_PAD;
break;
case OMAP_ECC_BCH8_CODE_HW:
case OMAP_ECC_BCH16_CODE_HW:
pos = err_vec[i].error_loc[j];
break;
default:
return -EINVAL;
}
error_max = (ecc->size + actual_eccbytes) * 8;
/* Calculate bit position of error */
bit_pos = pos % 8;
/* Calculate byte position of error */
byte_pos = (error_max - pos - 1) / 8;
if (pos < error_max) {
if (byte_pos < 512) {
pr_debug("bitflip@dat[%d]=%x\n",
byte_pos, data[byte_pos]);
data[byte_pos] ^= 1 << bit_pos;
} else {
pr_debug("bitflip@oob[%d]=%x\n",
(byte_pos - 512),
spare_ecc[byte_pos - 512]);
spare_ecc[byte_pos - 512] ^=
1 << bit_pos;
}
} else {
dev_err(&info->pdev->dev,
"invalid bit-flip @ %d:%d\n",
byte_pos, bit_pos);
err = -EBADMSG;
}
}
}
/* Update number of correctable errors */
stat = max_t(unsigned int, stat, err_vec[i].error_count);
/* Update page data with sector size */
data += ecc->size;
spare_ecc += ecc->bytes;
}
return (err) ? err : stat;
}
/**
* omap_write_page_bch - BCH ecc based write page function for entire page
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
* @page: page
*
* Custom write page method evolved to support multi sector writing in one shot
*/
static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct omap_nand_info *info = mtd_to_omap(mtd);
uint8_t *ecc_calc = chip->ecc.calc_buf;
unsigned int eccpg;
int ret;
ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
if (ret)
return ret;
for (eccpg = 0; eccpg < info->neccpg; eccpg++) {
/* Enable GPMC ecc engine */
chip->ecc.hwctl(chip, NAND_ECC_WRITE);
/* Write data */
info->data_out(chip, buf + (eccpg * info->eccpg_size),
info->eccpg_size, false);
/* Update ecc vector from GPMC result registers */
ret = omap_calculate_ecc_bch_multi(mtd,
buf + (eccpg * info->eccpg_size),
ecc_calc);
if (ret)
return ret;
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc,
chip->oob_poi,
eccpg * info->eccpg_bytes,
info->eccpg_bytes);
if (ret)
return ret;
}
/* Write ecc vector to OOB area */
info->data_out(chip, chip->oob_poi, mtd->oobsize, false);
return nand_prog_page_end_op(chip);
}
/**
* omap_write_subpage_bch - BCH hardware ECC based subpage write
* @chip: nand chip info structure
* @offset: column address of subpage within the page
* @data_len: data length
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
*
* OMAP optimized subpage write method.
*/
static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
u32 data_len, const u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct omap_nand_info *info = mtd_to_omap(mtd);
u8 *ecc_calc = chip->ecc.calc_buf;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
u32 start_step = offset / ecc_size;
u32 end_step = (offset + data_len - 1) / ecc_size;
unsigned int eccpg;
int step, ret = 0;
/*
* Write entire page at one go as it would be optimal
* as ECC is calculated by hardware.
* ECC is calculated for all subpages but we choose
* only what we want.
*/
ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
if (ret)
return ret;
for (eccpg = 0; eccpg < info->neccpg; eccpg++) {
/* Enable GPMC ECC engine */
chip->ecc.hwctl(chip, NAND_ECC_WRITE);
/* Write data */
info->data_out(chip, buf + (eccpg * info->eccpg_size),
info->eccpg_size, false);
for (step = 0; step < info->nsteps_per_eccpg; step++) {
unsigned int base_step = eccpg * info->nsteps_per_eccpg;
const u8 *bufoffs = buf + (eccpg * info->eccpg_size);
/* Mask ECC of un-touched subpages with 0xFFs */
if ((step + base_step) < start_step ||
(step + base_step) > end_step)
memset(ecc_calc + (step * ecc_bytes), 0xff,
ecc_bytes);
else
ret = _omap_calculate_ecc_bch(mtd,
bufoffs + (step * ecc_size),
ecc_calc + (step * ecc_bytes),
step);
if (ret)
return ret;
}
/*
* Copy the calculated ECC for the whole page including the
* masked values (0xFF) corresponding to unwritten subpages.
*/
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi,
eccpg * info->eccpg_bytes,
info->eccpg_bytes);
if (ret)
return ret;
}
/* write OOB buffer to NAND device */
info->data_out(chip, chip->oob_poi, mtd->oobsize, false);
return nand_prog_page_end_op(chip);
}
/**
* omap_read_page_bch - BCH ecc based page read function for entire page
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
* used for error correction.
* Custom method evolved to support ELM error correction & multi sector
* reading. On reading page data area is read along with OOB data with
* ecc engine enabled. ecc vector updated after read of OOB data.
* For non error pages ecc vector reported as zero.
*/
static int omap_read_page_bch(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct omap_nand_info *info = mtd_to_omap(mtd);
uint8_t *ecc_calc = chip->ecc.calc_buf;
uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0, eccpg;
int stat, ret;
ret = nand_read_page_op(chip, page, 0, NULL, 0);
if (ret)
return ret;
for (eccpg = 0; eccpg < info->neccpg; eccpg++) {
/* Enable GPMC ecc engine */
chip->ecc.hwctl(chip, NAND_ECC_READ);
/* Read data */
ret = nand_change_read_column_op(chip, eccpg * info->eccpg_size,
buf + (eccpg * info->eccpg_size),
info->eccpg_size, false);
if (ret)
return ret;
/* Read oob bytes */
ret = nand_change_read_column_op(chip,
mtd->writesize + BBM_LEN +
(eccpg * info->eccpg_bytes),
chip->oob_poi + BBM_LEN +
(eccpg * info->eccpg_bytes),
info->eccpg_bytes, false);
if (ret)
return ret;
/* Calculate ecc bytes */
ret = omap_calculate_ecc_bch_multi(mtd,
buf + (eccpg * info->eccpg_size),
ecc_calc);
if (ret)
return ret;
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code,
chip->oob_poi,
eccpg * info->eccpg_bytes,
info->eccpg_bytes);
if (ret)
return ret;
stat = chip->ecc.correct(chip,
buf + (eccpg * info->eccpg_size),
ecc_code, ecc_calc);
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
return max_bitflips;
}
/**
* is_elm_present - checks for presence of ELM module by scanning DT nodes
* @info: NAND device structure containing platform data
* @elm_node: ELM's DT node
*/
static bool is_elm_present(struct omap_nand_info *info,
struct device_node *elm_node)
{
struct platform_device *pdev;
/* check whether elm-id is passed via DT */
if (!elm_node) {
dev_err(&info->pdev->dev, "ELM devicetree node not found\n");
return false;
}
pdev = of_find_device_by_node(elm_node);
/* check whether ELM device is registered */
if (!pdev) {
dev_err(&info->pdev->dev, "ELM device not found\n");
return false;
}
/* ELM module available, now configure it */
info->elm_dev = &pdev->dev;
return true;
}
static bool omap2_nand_ecc_check(struct omap_nand_info *info)
{
bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
switch (info->ecc_opt) {
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
ecc_needs_omap_bch = false;
ecc_needs_bch = true;
ecc_needs_elm = false;
break;
case OMAP_ECC_BCH4_CODE_HW:
case OMAP_ECC_BCH8_CODE_HW:
case OMAP_ECC_BCH16_CODE_HW:
ecc_needs_omap_bch = true;
ecc_needs_bch = false;
ecc_needs_elm = true;
break;
default:
ecc_needs_omap_bch = false;
ecc_needs_bch = false;
ecc_needs_elm = false;
break;
}
if (ecc_needs_bch && !IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
dev_err(&info->pdev->dev,
"CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
return false;
}
if (ecc_needs_omap_bch && !IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)) {
dev_err(&info->pdev->dev,
"CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
return false;
}
if (ecc_needs_elm && !is_elm_present(info, info->elm_of_node)) {
dev_err(&info->pdev->dev, "ELM not available\n");
return false;
}
return true;
}
static const char * const nand_xfer_types[] = {
[NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
[NAND_OMAP_POLLED] = "polled",
[NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
[NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
};
static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info)
{
struct device_node *child = dev->of_node;
int i;
const char *s;
u32 cs;
if (of_property_read_u32(child, "reg", &cs) < 0) {
dev_err(dev, "reg not found in DT\n");
return -EINVAL;
}
info->gpmc_cs = cs;
/* detect availability of ELM module. Won't be present pre-OMAP4 */
info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
if (!info->elm_of_node) {
info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
if (!info->elm_of_node)
dev_dbg(dev, "ti,elm-id not in DT\n");
}
/* select ecc-scheme for NAND */
if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
dev_err(dev, "ti,nand-ecc-opt not found\n");
return -EINVAL;
}
if (!strcmp(s, "sw")) {
info->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
} else if (!strcmp(s, "ham1") ||
!strcmp(s, "hw") || !strcmp(s, "hw-romcode")) {
info->ecc_opt = OMAP_ECC_HAM1_CODE_HW;
} else if (!strcmp(s, "bch4")) {
if (info->elm_of_node)
info->ecc_opt = OMAP_ECC_BCH4_CODE_HW;
else
info->ecc_opt = OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
} else if (!strcmp(s, "bch8")) {
if (info->elm_of_node)
info->ecc_opt = OMAP_ECC_BCH8_CODE_HW;
else
info->ecc_opt = OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
} else if (!strcmp(s, "bch16")) {
info->ecc_opt = OMAP_ECC_BCH16_CODE_HW;
} else {
dev_err(dev, "unrecognized value for ti,nand-ecc-opt\n");
return -EINVAL;
}
/* select data transfer mode */
if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) {
for (i = 0; i < ARRAY_SIZE(nand_xfer_types); i++) {
if (!strcasecmp(s, nand_xfer_types[i])) {
info->xfer_type = i;
return 0;
}
}
dev_err(dev, "unrecognized value for ti,nand-xfer-type\n");
return -EINVAL;
}
return 0;
}
static int omap_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
struct nand_chip *chip = &info->nand;
int off = BBM_LEN;
if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
!(chip->options & NAND_BUSWIDTH_16))
off = 1;
if (section)
return -ERANGE;
oobregion->offset = off;
oobregion->length = chip->ecc.total;
return 0;
}
static int omap_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
struct nand_chip *chip = &info->nand;
int off = BBM_LEN;
if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
!(chip->options & NAND_BUSWIDTH_16))
off = 1;
if (section)
return -ERANGE;
off += chip->ecc.total;
if (off >= mtd->oobsize)
return -ERANGE;
oobregion->offset = off;
oobregion->length = mtd->oobsize - off;
return 0;
}
static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
.ecc = omap_ooblayout_ecc,
.free = omap_ooblayout_free,
};
static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
unsigned int nsteps = nanddev_get_ecc_nsteps(nand);
unsigned int ecc_bytes = nanddev_get_ecc_bytes_per_step(nand);
int off = BBM_LEN;
if (section >= nsteps)
return -ERANGE;
/*
* When SW correction is employed, one OMAP specific marker byte is
* reserved after each ECC step.
*/
oobregion->offset = off + (section * (ecc_bytes + 1));
oobregion->length = ecc_bytes;
return 0;
}
static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
unsigned int nsteps = nanddev_get_ecc_nsteps(nand);
unsigned int ecc_bytes = nanddev_get_ecc_bytes_per_step(nand);
int off = BBM_LEN;
if (section)
return -ERANGE;
/*
* When SW correction is employed, one OMAP specific marker byte is
* reserved after each ECC step.
*/
off += ((ecc_bytes + 1) * nsteps);
if (off >= mtd->oobsize)
return -ERANGE;
oobregion->offset = off;
oobregion->length = mtd->oobsize - off;
return 0;
}
static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
.ecc = omap_sw_ooblayout_ecc,
.free = omap_sw_ooblayout_free,
};
static int omap_nand_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct omap_nand_info *info = mtd_to_omap(mtd);
struct device *dev = &info->pdev->dev;
int min_oobbytes = BBM_LEN;
int elm_bch_strength = -1;
int oobbytes_per_step;
dma_cap_mask_t mask;
int err;
if (chip->bbt_options & NAND_BBT_USE_FLASH)
chip->bbt_options |= NAND_BBT_NO_OOB;
else
chip->options |= NAND_SKIP_BBTSCAN;
/* Re-populate low-level callbacks based on xfer modes */
switch (info->xfer_type) {
case NAND_OMAP_PREFETCH_POLLED:
info->data_in = omap_nand_data_in_pref;
info->data_out = omap_nand_data_out_pref;
break;
case NAND_OMAP_POLLED:
/* Use nand_base defaults for {read,write}_buf */
break;
case NAND_OMAP_PREFETCH_DMA:
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
info->dma = dma_request_chan(dev->parent, "rxtx");
if (IS_ERR(info->dma)) {
dev_err(dev, "DMA engine request failed\n");
return PTR_ERR(info->dma);
} else {
struct dma_slave_config cfg;
memset(&cfg, 0, sizeof(cfg));
cfg.src_addr = info->phys_base;
cfg.dst_addr = info->phys_base;
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cfg.src_maxburst = 16;
cfg.dst_maxburst = 16;
err = dmaengine_slave_config(info->dma, &cfg);
if (err) {
dev_err(dev,
"DMA engine slave config failed: %d\n",
err);
return err;
}
info->data_in = omap_nand_data_in_dma_pref;
info->data_out = omap_nand_data_out_dma_pref;
}
break;
case NAND_OMAP_PREFETCH_IRQ:
info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
if (info->gpmc_irq_fifo <= 0)
return -ENODEV;
err = devm_request_irq(dev, info->gpmc_irq_fifo,
omap_nand_irq, IRQF_SHARED,
"gpmc-nand-fifo", info);
if (err) {
dev_err(dev, "Requesting IRQ %d, error %d\n",
info->gpmc_irq_fifo, err);
info->gpmc_irq_fifo = 0;
return err;
}
info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
if (info->gpmc_irq_count <= 0)
return -ENODEV;
err = devm_request_irq(dev, info->gpmc_irq_count,
omap_nand_irq, IRQF_SHARED,
"gpmc-nand-count", info);
if (err) {
dev_err(dev, "Requesting IRQ %d, error %d\n",
info->gpmc_irq_count, err);
info->gpmc_irq_count = 0;
return err;
}
info->data_in = omap_nand_data_in_irq_pref;
info->data_out = omap_nand_data_out_irq_pref;
break;
default:
dev_err(dev, "xfer_type %d not supported!\n", info->xfer_type);
return -EINVAL;
}
if (!omap2_nand_ecc_check(info))
return -EINVAL;
/*
* Bail out earlier to let NAND_ECC_ENGINE_TYPE_SOFT code create its own
* ooblayout instead of using ours.
*/
if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
/* Populate MTD interface based on ECC scheme */
switch (info->ecc_opt) {
case OMAP_ECC_HAM1_CODE_HW:
dev_info(dev, "nand: using OMAP_ECC_HAM1_CODE_HW\n");
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.bytes = 3;
chip->ecc.size = 512;
chip->ecc.strength = 1;
chip->ecc.calculate = omap_calculate_ecc;
chip->ecc.hwctl = omap_enable_hwecc;
chip->ecc.correct = omap_correct_data;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = chip->ecc.bytes;
if (!(chip->options & NAND_BUSWIDTH_16))
min_oobbytes = 1;
break;
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.size = 512;
chip->ecc.bytes = 7;
chip->ecc.strength = 4;
chip->ecc.hwctl = omap_enable_hwecc_bch;
chip->ecc.correct = rawnand_sw_bch_correct;
chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
oobbytes_per_step = chip->ecc.bytes + 1;
/* Software BCH library is used for locating errors */
err = rawnand_sw_bch_init(chip);
if (err) {
dev_err(dev, "Unable to use BCH library\n");
return err;
}
break;
case OMAP_ECC_BCH4_CODE_HW:
pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.size = 512;
/* 14th bit is kept reserved for ROM-code compatibility */
chip->ecc.bytes = 7 + 1;
chip->ecc.strength = 4;
chip->ecc.hwctl = omap_enable_hwecc_bch;
chip->ecc.correct = omap_elm_correct_data;
chip->ecc.read_page = omap_read_page_bch;
chip->ecc.write_page = omap_write_page_bch;
chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = chip->ecc.bytes;
elm_bch_strength = BCH4_ECC;
break;
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.size = 512;
chip->ecc.bytes = 13;
chip->ecc.strength = 8;
chip->ecc.hwctl = omap_enable_hwecc_bch;
chip->ecc.correct = rawnand_sw_bch_correct;
chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
oobbytes_per_step = chip->ecc.bytes + 1;
/* Software BCH library is used for locating errors */
err = rawnand_sw_bch_init(chip);
if (err) {
dev_err(dev, "unable to use BCH library\n");
return err;
}
break;
case OMAP_ECC_BCH8_CODE_HW:
pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.size = 512;
/* 14th bit is kept reserved for ROM-code compatibility */
chip->ecc.bytes = 13 + 1;
chip->ecc.strength = 8;
chip->ecc.hwctl = omap_enable_hwecc_bch;
chip->ecc.correct = omap_elm_correct_data;
chip->ecc.read_page = omap_read_page_bch;
chip->ecc.write_page = omap_write_page_bch;
chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = chip->ecc.bytes;
elm_bch_strength = BCH8_ECC;
break;
case OMAP_ECC_BCH16_CODE_HW:
pr_info("Using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.size = 512;
chip->ecc.bytes = 26;
chip->ecc.strength = 16;
chip->ecc.hwctl = omap_enable_hwecc_bch;
chip->ecc.correct = omap_elm_correct_data;
chip->ecc.read_page = omap_read_page_bch;
chip->ecc.write_page = omap_write_page_bch;
chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = chip->ecc.bytes;
elm_bch_strength = BCH16_ECC;
break;
default:
dev_err(dev, "Invalid or unsupported ECC scheme\n");
return -EINVAL;
}
if (elm_bch_strength >= 0) {
chip->ecc.steps = mtd->writesize / chip->ecc.size;
info->neccpg = chip->ecc.steps / ERROR_VECTOR_MAX;
if (info->neccpg) {
info->nsteps_per_eccpg = ERROR_VECTOR_MAX;
} else {
info->neccpg = 1;
info->nsteps_per_eccpg = chip->ecc.steps;
}
info->eccpg_size = info->nsteps_per_eccpg * chip->ecc.size;
info->eccpg_bytes = info->nsteps_per_eccpg * chip->ecc.bytes;
err = elm_config(info->elm_dev, elm_bch_strength,
info->nsteps_per_eccpg, chip->ecc.size,
chip->ecc.bytes);
if (err < 0)
return err;
}
/* Check if NAND device's OOB is enough to store ECC signatures */
min_oobbytes += (oobbytes_per_step *
(mtd->writesize / chip->ecc.size));
if (mtd->oobsize < min_oobbytes) {
dev_err(dev,
"Not enough OOB bytes: required = %d, available=%d\n",
min_oobbytes, mtd->oobsize);
return -EINVAL;
}
return 0;
}
static void omap_nand_data_in(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
u32 alignment = ((uintptr_t)buf | len) & 3;
if (force_8bit || (alignment & 1))
ioread8_rep(info->fifo, buf, len);
else if (alignment & 3)
ioread16_rep(info->fifo, buf, len >> 1);
else
ioread32_rep(info->fifo, buf, len >> 2);
}
static void omap_nand_data_out(struct nand_chip *chip,
const void *buf, unsigned int len,
bool force_8bit)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
u32 alignment = ((uintptr_t)buf | len) & 3;
if (force_8bit || (alignment & 1))
iowrite8_rep(info->fifo, buf, len);
else if (alignment & 3)
iowrite16_rep(info->fifo, buf, len >> 1);
else
iowrite32_rep(info->fifo, buf, len >> 2);
}
static int omap_nand_exec_instr(struct nand_chip *chip,
const struct nand_op_instr *instr)
{
struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
unsigned int i;
int ret;
switch (instr->type) {
case NAND_OP_CMD_INSTR:
iowrite8(instr->ctx.cmd.opcode,
info->reg.gpmc_nand_command);
break;
case NAND_OP_ADDR_INSTR:
for (i = 0; i < instr->ctx.addr.naddrs; i++) {
iowrite8(instr->ctx.addr.addrs[i],
info->reg.gpmc_nand_address);
}
break;
case NAND_OP_DATA_IN_INSTR:
info->data_in(chip, instr->ctx.data.buf.in,
instr->ctx.data.len,
instr->ctx.data.force_8bit);
break;
case NAND_OP_DATA_OUT_INSTR:
info->data_out(chip, instr->ctx.data.buf.out,
instr->ctx.data.len,
instr->ctx.data.force_8bit);
break;
case NAND_OP_WAITRDY_INSTR:
ret = info->ready_gpiod ?
nand_gpio_waitrdy(chip, info->ready_gpiod, instr->ctx.waitrdy.timeout_ms) :
nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
if (ret)
return ret;
break;
}
if (instr->delay_ns)
ndelay(instr->delay_ns);
return 0;
}
static int omap_nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
unsigned int i;
if (check_only)
return 0;
for (i = 0; i < op->ninstrs; i++) {
int ret;
ret = omap_nand_exec_instr(chip, &op->instrs[i]);
if (ret)
return ret;
}
return 0;
}
static const struct nand_controller_ops omap_nand_controller_ops = {
.attach_chip = omap_nand_attach_chip,
.exec_op = omap_nand_exec_op,
};
/* Shared among all NAND instances to synchronize access to the ECC Engine */
static struct nand_controller omap_gpmc_controller;
static bool omap_gpmc_controller_initialized;
static int omap_nand_probe(struct platform_device *pdev)
{
struct omap_nand_info *info;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
int err;
struct resource *res;
struct device *dev = &pdev->dev;
void __iomem *vaddr;
info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
info->pdev = pdev;
err = omap_get_dt_info(dev, info);
if (err)
return err;
info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
if (!info->ops) {
dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
return -ENODEV;
}
nand_chip = &info->nand;
mtd = nand_to_mtd(nand_chip);
mtd->dev.parent = &pdev->dev;
nand_set_flash_node(nand_chip, dev->of_node);
if (!mtd->name) {
mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"omap2-nand.%d", info->gpmc_cs);
if (!mtd->name) {
dev_err(&pdev->dev, "Failed to set MTD name\n");
return -ENOMEM;
}
}
vaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
info->fifo = vaddr;
info->phys_base = res->start;
if (!omap_gpmc_controller_initialized) {
omap_gpmc_controller.ops = &omap_nand_controller_ops;
nand_controller_init(&omap_gpmc_controller);
omap_gpmc_controller_initialized = true;
}
nand_chip->controller = &omap_gpmc_controller;
info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
GPIOD_IN);
if (IS_ERR(info->ready_gpiod)) {
dev_err(dev, "failed to get ready gpio\n");
return PTR_ERR(info->ready_gpiod);
}
if (info->flash_bbt)
nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
/* default operations */
info->data_in = omap_nand_data_in;
info->data_out = omap_nand_data_out;
err = nand_scan(nand_chip, 1);
if (err)
goto return_error;
err = mtd_device_register(mtd, NULL, 0);
if (err)
goto cleanup_nand;
platform_set_drvdata(pdev, mtd);
return 0;
cleanup_nand:
nand_cleanup(nand_chip);
return_error:
if (!IS_ERR_OR_NULL(info->dma))
dma_release_channel(info->dma);
rawnand_sw_bch_cleanup(nand_chip);
return err;
}
static void omap_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct omap_nand_info *info = mtd_to_omap(mtd);
rawnand_sw_bch_cleanup(nand_chip);
if (info->dma)
dma_release_channel(info->dma);
WARN_ON(mtd_device_unregister(mtd));
nand_cleanup(nand_chip);
}
/* omap_nand_ids defined in linux/platform_data/mtd-nand-omap2.h */
MODULE_DEVICE_TABLE(of, omap_nand_ids);
static struct platform_driver omap_nand_driver = {
.probe = omap_nand_probe,
.remove_new = omap_nand_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = omap_nand_ids,
},
};
module_platform_driver(omap_nand_driver);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
| linux-master | drivers/mtd/nand/raw/omap2.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright 2008 Sascha Hauer, [email protected]
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/completion.h>
#include <linux/of.h>
#define DRIVER_NAME "mxc_nand"
/* Addresses for NFC registers */
#define NFC_V1_V2_BUF_SIZE (host->regs + 0x00)
#define NFC_V1_V2_BUF_ADDR (host->regs + 0x04)
#define NFC_V1_V2_FLASH_ADDR (host->regs + 0x06)
#define NFC_V1_V2_FLASH_CMD (host->regs + 0x08)
#define NFC_V1_V2_CONFIG (host->regs + 0x0a)
#define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c)
#define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e)
#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10)
#define NFC_V1_V2_WRPROT (host->regs + 0x12)
#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
#define NFC_V21_UNLOCKSTART_BLKADDR0 (host->regs + 0x20)
#define NFC_V21_UNLOCKSTART_BLKADDR1 (host->regs + 0x24)
#define NFC_V21_UNLOCKSTART_BLKADDR2 (host->regs + 0x28)
#define NFC_V21_UNLOCKSTART_BLKADDR3 (host->regs + 0x2c)
#define NFC_V21_UNLOCKEND_BLKADDR0 (host->regs + 0x22)
#define NFC_V21_UNLOCKEND_BLKADDR1 (host->regs + 0x26)
#define NFC_V21_UNLOCKEND_BLKADDR2 (host->regs + 0x2a)
#define NFC_V21_UNLOCKEND_BLKADDR3 (host->regs + 0x2e)
#define NFC_V1_V2_NF_WRPRST (host->regs + 0x18)
#define NFC_V1_V2_CONFIG1 (host->regs + 0x1a)
#define NFC_V1_V2_CONFIG2 (host->regs + 0x1c)
#define NFC_V2_CONFIG1_ECC_MODE_4 (1 << 0)
#define NFC_V1_V2_CONFIG1_SP_EN (1 << 2)
#define NFC_V1_V2_CONFIG1_ECC_EN (1 << 3)
#define NFC_V1_V2_CONFIG1_INT_MSK (1 << 4)
#define NFC_V1_V2_CONFIG1_BIG (1 << 5)
#define NFC_V1_V2_CONFIG1_RST (1 << 6)
#define NFC_V1_V2_CONFIG1_CE (1 << 7)
#define NFC_V2_CONFIG1_ONE_CYCLE (1 << 8)
#define NFC_V2_CONFIG1_PPB(x) (((x) & 0x3) << 9)
#define NFC_V2_CONFIG1_FP_INT (1 << 11)
#define NFC_V1_V2_CONFIG2_INT (1 << 15)
/*
* Operation modes for the NFC. Valid for v1, v2 and v3
* type controllers.
*/
#define NFC_CMD (1 << 0)
#define NFC_ADDR (1 << 1)
#define NFC_INPUT (1 << 2)
#define NFC_OUTPUT (1 << 3)
#define NFC_ID (1 << 4)
#define NFC_STATUS (1 << 5)
#define NFC_V3_FLASH_CMD (host->regs_axi + 0x00)
#define NFC_V3_FLASH_ADDR0 (host->regs_axi + 0x04)
#define NFC_V3_CONFIG1 (host->regs_axi + 0x34)
#define NFC_V3_CONFIG1_SP_EN (1 << 0)
#define NFC_V3_CONFIG1_RBA(x) (((x) & 0x7 ) << 4)
#define NFC_V3_ECC_STATUS_RESULT (host->regs_axi + 0x38)
#define NFC_V3_LAUNCH (host->regs_axi + 0x40)
#define NFC_V3_WRPROT (host->regs_ip + 0x0)
#define NFC_V3_WRPROT_LOCK_TIGHT (1 << 0)
#define NFC_V3_WRPROT_LOCK (1 << 1)
#define NFC_V3_WRPROT_UNLOCK (1 << 2)
#define NFC_V3_WRPROT_BLS_UNLOCK (2 << 6)
#define NFC_V3_WRPROT_UNLOCK_BLK_ADD0 (host->regs_ip + 0x04)
#define NFC_V3_CONFIG2 (host->regs_ip + 0x24)
#define NFC_V3_CONFIG2_PS_512 (0 << 0)
#define NFC_V3_CONFIG2_PS_2048 (1 << 0)
#define NFC_V3_CONFIG2_PS_4096 (2 << 0)
#define NFC_V3_CONFIG2_ONE_CYCLE (1 << 2)
#define NFC_V3_CONFIG2_ECC_EN (1 << 3)
#define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4)
#define NFC_V3_CONFIG2_NUM_ADDR_PHASE0 (1 << 5)
#define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6)
#define NFC_V3_CONFIG2_PPB(x, shift) (((x) & 0x3) << shift)
#define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x) (((x) & 0x3) << 12)
#define NFC_V3_CONFIG2_INT_MSK (1 << 15)
#define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24)
#define NFC_V3_CONFIG2_SPAS(x) (((x) & 0xff) << 16)
#define NFC_V3_CONFIG3 (host->regs_ip + 0x28)
#define NFC_V3_CONFIG3_ADD_OP(x) (((x) & 0x3) << 0)
#define NFC_V3_CONFIG3_FW8 (1 << 3)
#define NFC_V3_CONFIG3_SBB(x) (((x) & 0x7) << 8)
#define NFC_V3_CONFIG3_NUM_OF_DEVICES(x) (((x) & 0x7) << 12)
#define NFC_V3_CONFIG3_RBB_MODE (1 << 15)
#define NFC_V3_CONFIG3_NO_SDMA (1 << 20)
#define NFC_V3_IPC (host->regs_ip + 0x2C)
#define NFC_V3_IPC_CREQ (1 << 0)
#define NFC_V3_IPC_INT (1 << 31)
#define NFC_V3_DELAY_LINE (host->regs_ip + 0x34)
struct mxc_nand_host;
struct mxc_nand_devtype_data {
void (*preset)(struct mtd_info *);
int (*read_page)(struct nand_chip *chip, void *buf, void *oob, bool ecc,
int page);
void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
void (*send_page)(struct mtd_info *, unsigned int);
void (*send_read_id)(struct mxc_nand_host *);
uint16_t (*get_dev_status)(struct mxc_nand_host *);
int (*check_int)(struct mxc_nand_host *);
void (*irq_control)(struct mxc_nand_host *, int);
u32 (*get_ecc_status)(struct mxc_nand_host *);
const struct mtd_ooblayout_ops *ooblayout;
void (*select_chip)(struct nand_chip *chip, int cs);
int (*setup_interface)(struct nand_chip *chip, int csline,
const struct nand_interface_config *conf);
void (*enable_hwecc)(struct nand_chip *chip, bool enable);
/*
* On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
* (CONFIG1:INT_MSK is set). To handle this the driver uses
* enable_irq/disable_irq_nosync instead of CONFIG1:INT_MSK
*/
int irqpending_quirk;
int needs_ip;
size_t regs_offset;
size_t spare0_offset;
size_t axi_offset;
int spare_len;
int eccbytes;
int eccsize;
int ppb_shift;
};
struct mxc_nand_host {
struct nand_chip nand;
struct device *dev;
void __iomem *spare0;
void __iomem *main_area0;
void __iomem *base;
void __iomem *regs;
void __iomem *regs_axi;
void __iomem *regs_ip;
int status_request;
struct clk *clk;
int clk_act;
int irq;
int eccsize;
int used_oobsize;
int active_cs;
struct completion op_completion;
uint8_t *data_buf;
unsigned int buf_start;
const struct mxc_nand_devtype_data *devtype_data;
};
static const char * const part_probes[] = {
"cmdlinepart", "RedBoot", "ofpart", NULL };
static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
{
int i;
u32 *t = trg;
const __iomem u32 *s = src;
for (i = 0; i < (size >> 2); i++)
*t++ = __raw_readl(s++);
}
static void memcpy16_fromio(void *trg, const void __iomem *src, size_t size)
{
int i;
u16 *t = trg;
const __iomem u16 *s = src;
/* We assume that src (IO) is always 32bit aligned */
if (PTR_ALIGN(trg, 4) == trg && IS_ALIGNED(size, 4)) {
memcpy32_fromio(trg, src, size);
return;
}
for (i = 0; i < (size >> 1); i++)
*t++ = __raw_readw(s++);
}
static inline void memcpy32_toio(void __iomem *trg, const void *src, int size)
{
/* __iowrite32_copy use 32bit size values so divide by 4 */
__iowrite32_copy(trg, src, size / 4);
}
static void memcpy16_toio(void __iomem *trg, const void *src, int size)
{
int i;
__iomem u16 *t = trg;
const u16 *s = src;
/* We assume that trg (IO) is always 32bit aligned */
if (PTR_ALIGN(src, 4) == src && IS_ALIGNED(size, 4)) {
memcpy32_toio(trg, src, size);
return;
}
for (i = 0; i < (size >> 1); i++)
__raw_writew(*s++, t++);
}
/*
* The controller splits a page into data chunks of 512 bytes + partial oob.
* There are writesize / 512 such chunks, the size of the partial oob parts is
* oobsize / #chunks rounded down to a multiple of 2. The last oob chunk then
* contains additionally the byte lost by rounding (if any).
* This function handles the needed shuffling between host->data_buf (which
* holds a page in natural order, i.e. writesize bytes data + oobsize bytes
* spare) and the NFC buffer.
*/
static void copy_spare(struct mtd_info *mtd, bool bfrom, void *buf)
{
struct nand_chip *this = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(this);
u16 i, oob_chunk_size;
u16 num_chunks = mtd->writesize / 512;
u8 *d = buf;
u8 __iomem *s = host->spare0;
u16 sparebuf_size = host->devtype_data->spare_len;
/* size of oob chunk for all but possibly the last one */
oob_chunk_size = (host->used_oobsize / num_chunks) & ~1;
if (bfrom) {
for (i = 0; i < num_chunks - 1; i++)
memcpy16_fromio(d + i * oob_chunk_size,
s + i * sparebuf_size,
oob_chunk_size);
/* the last chunk */
memcpy16_fromio(d + i * oob_chunk_size,
s + i * sparebuf_size,
host->used_oobsize - i * oob_chunk_size);
} else {
for (i = 0; i < num_chunks - 1; i++)
memcpy16_toio(&s[i * sparebuf_size],
&d[i * oob_chunk_size],
oob_chunk_size);
/* the last chunk */
memcpy16_toio(&s[i * sparebuf_size],
&d[i * oob_chunk_size],
host->used_oobsize - i * oob_chunk_size);
}
}
/*
* MXC NANDFC can only perform full page+spare or spare-only read/write. When
* the upper layers perform a read/write buf operation, the saved column address
* is used to index into the full page. So usually this function is called with
* column == 0 (unless no column cycle is needed indicated by column == -1)
*/
static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
/* Write out column address, if necessary */
if (column != -1) {
host->devtype_data->send_addr(host, column & 0xff,
page_addr == -1);
if (mtd->writesize > 512)
/* another col addr cycle for 2k page */
host->devtype_data->send_addr(host,
(column >> 8) & 0xff,
false);
}
/* Write out page address, if necessary */
if (page_addr != -1) {
/* paddr_0 - p_addr_7 */
host->devtype_data->send_addr(host, (page_addr & 0xff), false);
if (mtd->writesize > 512) {
if (mtd->size >= 0x10000000) {
/* paddr_8 - paddr_15 */
host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff,
false);
host->devtype_data->send_addr(host,
(page_addr >> 16) & 0xff,
true);
} else
/* paddr_8 - paddr_15 */
host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff, true);
} else {
if (nand_chip->options & NAND_ROW_ADDR_3) {
/* paddr_8 - paddr_15 */
host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff,
false);
host->devtype_data->send_addr(host,
(page_addr >> 16) & 0xff,
true);
} else
/* paddr_8 - paddr_15 */
host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff, true);
}
}
}
static int check_int_v3(struct mxc_nand_host *host)
{
uint32_t tmp;
tmp = readl(NFC_V3_IPC);
if (!(tmp & NFC_V3_IPC_INT))
return 0;
tmp &= ~NFC_V3_IPC_INT;
writel(tmp, NFC_V3_IPC);
return 1;
}
static int check_int_v1_v2(struct mxc_nand_host *host)
{
uint32_t tmp;
tmp = readw(NFC_V1_V2_CONFIG2);
if (!(tmp & NFC_V1_V2_CONFIG2_INT))
return 0;
if (!host->devtype_data->irqpending_quirk)
writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
return 1;
}
static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
{
uint16_t tmp;
tmp = readw(NFC_V1_V2_CONFIG1);
if (activate)
tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK;
else
tmp |= NFC_V1_V2_CONFIG1_INT_MSK;
writew(tmp, NFC_V1_V2_CONFIG1);
}
static void irq_control_v3(struct mxc_nand_host *host, int activate)
{
uint32_t tmp;
tmp = readl(NFC_V3_CONFIG2);
if (activate)
tmp &= ~NFC_V3_CONFIG2_INT_MSK;
else
tmp |= NFC_V3_CONFIG2_INT_MSK;
writel(tmp, NFC_V3_CONFIG2);
}
static void irq_control(struct mxc_nand_host *host, int activate)
{
if (host->devtype_data->irqpending_quirk) {
if (activate)
enable_irq(host->irq);
else
disable_irq_nosync(host->irq);
} else {
host->devtype_data->irq_control(host, activate);
}
}
static u32 get_ecc_status_v1(struct mxc_nand_host *host)
{
return readw(NFC_V1_V2_ECC_STATUS_RESULT);
}
static u32 get_ecc_status_v2(struct mxc_nand_host *host)
{
return readl(NFC_V1_V2_ECC_STATUS_RESULT);
}
static u32 get_ecc_status_v3(struct mxc_nand_host *host)
{
return readl(NFC_V3_ECC_STATUS_RESULT);
}
static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
{
struct mxc_nand_host *host = dev_id;
if (!host->devtype_data->check_int(host))
return IRQ_NONE;
irq_control(host, 0);
complete(&host->op_completion);
return IRQ_HANDLED;
}
/* This function polls the NANDFC to wait for the basic operation to
* complete by checking the INT bit of config2 register.
*/
static int wait_op_done(struct mxc_nand_host *host, int useirq)
{
int ret = 0;
/*
* If operation is already complete, don't bother to setup an irq or a
* loop.
*/
if (host->devtype_data->check_int(host))
return 0;
if (useirq) {
unsigned long timeout;
reinit_completion(&host->op_completion);
irq_control(host, 1);
timeout = wait_for_completion_timeout(&host->op_completion, HZ);
if (!timeout && !host->devtype_data->check_int(host)) {
dev_dbg(host->dev, "timeout waiting for irq\n");
ret = -ETIMEDOUT;
}
} else {
int max_retries = 8000;
int done;
do {
udelay(1);
done = host->devtype_data->check_int(host);
if (done)
break;
} while (--max_retries);
if (!done) {
dev_dbg(host->dev, "timeout polling for completion\n");
ret = -ETIMEDOUT;
}
}
WARN_ONCE(ret < 0, "timeout! useirq=%d\n", useirq);
return ret;
}
static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
{
/* fill command */
writel(cmd, NFC_V3_FLASH_CMD);
/* send out command */
writel(NFC_CMD, NFC_V3_LAUNCH);
/* Wait for operation to complete */
wait_op_done(host, useirq);
}
/* This function issues the specified command to the NAND device and
* waits for completion. */
static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
{
dev_dbg(host->dev, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
writew(cmd, NFC_V1_V2_FLASH_CMD);
writew(NFC_CMD, NFC_V1_V2_CONFIG2);
if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) {
int max_retries = 100;
/* Reset completion is indicated by NFC_CONFIG2 */
/* being set to 0 */
while (max_retries-- > 0) {
if (readw(NFC_V1_V2_CONFIG2) == 0) {
break;
}
udelay(1);
}
if (max_retries < 0)
dev_dbg(host->dev, "%s: RESET failed\n", __func__);
} else {
/* Wait for operation to complete */
wait_op_done(host, useirq);
}
}
static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
{
/* fill address */
writel(addr, NFC_V3_FLASH_ADDR0);
/* send out address */
writel(NFC_ADDR, NFC_V3_LAUNCH);
wait_op_done(host, 0);
}
/* This function sends an address (or partial address) to the
* NAND device. The address is used to select the source/destination for
* a NAND command. */
static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
{
dev_dbg(host->dev, "send_addr(host, 0x%x %d)\n", addr, islast);
writew(addr, NFC_V1_V2_FLASH_ADDR);
writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
/* Wait for operation to complete */
wait_op_done(host, islast);
}
static void send_page_v3(struct mtd_info *mtd, unsigned int ops)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
uint32_t tmp;
tmp = readl(NFC_V3_CONFIG1);
tmp &= ~(7 << 4);
writel(tmp, NFC_V3_CONFIG1);
/* transfer data from NFC ram to nand */
writel(ops, NFC_V3_LAUNCH);
wait_op_done(host, false);
}
static void send_page_v2(struct mtd_info *mtd, unsigned int ops)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
/* NANDFC buffer 0 is used for page read/write */
writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
writew(ops, NFC_V1_V2_CONFIG2);
/* Wait for operation to complete */
wait_op_done(host, true);
}
static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
int bufs, i;
if (mtd->writesize > 512)
bufs = 4;
else
bufs = 1;
for (i = 0; i < bufs; i++) {
/* NANDFC buffer 0 is used for page read/write */
writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
writew(ops, NFC_V1_V2_CONFIG2);
/* Wait for operation to complete */
wait_op_done(host, true);
}
}
static void send_read_id_v3(struct mxc_nand_host *host)
{
/* Read ID into main buffer */
writel(NFC_ID, NFC_V3_LAUNCH);
wait_op_done(host, true);
memcpy32_fromio(host->data_buf, host->main_area0, 16);
}
/* Request the NANDFC to perform a read of the NAND device ID. */
static void send_read_id_v1_v2(struct mxc_nand_host *host)
{
/* NANDFC buffer 0 is used for device ID output */
writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
writew(NFC_ID, NFC_V1_V2_CONFIG2);
/* Wait for operation to complete */
wait_op_done(host, true);
memcpy32_fromio(host->data_buf, host->main_area0, 16);
}
static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
{
writew(NFC_STATUS, NFC_V3_LAUNCH);
wait_op_done(host, true);
return readl(NFC_V3_CONFIG1) >> 16;
}
/* This function requests the NANDFC to perform a read of the
* NAND device status and returns the current status. */
static uint16_t get_dev_status_v1_v2(struct mxc_nand_host *host)
{
void __iomem *main_buf = host->main_area0;
uint32_t store;
uint16_t ret;
writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
/*
* The device status is stored in main_area0. To
* prevent corruption of the buffer save the value
* and restore it afterwards.
*/
store = readl(main_buf);
writew(NFC_STATUS, NFC_V1_V2_CONFIG2);
wait_op_done(host, true);
ret = readw(main_buf);
writel(store, main_buf);
return ret;
}
static void mxc_nand_enable_hwecc_v1_v2(struct nand_chip *chip, bool enable)
{
struct mxc_nand_host *host = nand_get_controller_data(chip);
uint16_t config1;
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return;
config1 = readw(NFC_V1_V2_CONFIG1);
if (enable)
config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
else
config1 &= ~NFC_V1_V2_CONFIG1_ECC_EN;
writew(config1, NFC_V1_V2_CONFIG1);
}
static void mxc_nand_enable_hwecc_v3(struct nand_chip *chip, bool enable)
{
struct mxc_nand_host *host = nand_get_controller_data(chip);
uint32_t config2;
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return;
config2 = readl(NFC_V3_CONFIG2);
if (enable)
config2 |= NFC_V3_CONFIG2_ECC_EN;
else
config2 &= ~NFC_V3_CONFIG2_ECC_EN;
writel(config2, NFC_V3_CONFIG2);
}
/* This functions is used by upper layer to checks if device is ready */
static int mxc_nand_dev_ready(struct nand_chip *chip)
{
/*
* NFC handles R/B internally. Therefore, this function
* always returns status as ready.
*/
return 1;
}
static int mxc_nand_read_page_v1(struct nand_chip *chip, void *buf, void *oob,
bool ecc, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
unsigned int bitflips_corrected = 0;
int no_subpages;
int i;
host->devtype_data->enable_hwecc(chip, ecc);
host->devtype_data->send_cmd(host, NAND_CMD_READ0, false);
mxc_do_addr_cycle(mtd, 0, page);
if (mtd->writesize > 512)
host->devtype_data->send_cmd(host, NAND_CMD_READSTART, true);
no_subpages = mtd->writesize >> 9;
for (i = 0; i < no_subpages; i++) {
uint16_t ecc_stats;
/* NANDFC buffer 0 is used for page read/write */
writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
writew(NFC_OUTPUT, NFC_V1_V2_CONFIG2);
/* Wait for operation to complete */
wait_op_done(host, true);
ecc_stats = get_ecc_status_v1(host);
ecc_stats >>= 2;
if (buf && ecc) {
switch (ecc_stats & 0x3) {
case 0:
default:
break;
case 1:
mtd->ecc_stats.corrected++;
bitflips_corrected = 1;
break;
case 2:
mtd->ecc_stats.failed++;
break;
}
}
}
if (buf)
memcpy32_fromio(buf, host->main_area0, mtd->writesize);
if (oob)
copy_spare(mtd, true, oob);
return bitflips_corrected;
}
static int mxc_nand_read_page_v2_v3(struct nand_chip *chip, void *buf,
void *oob, bool ecc, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
unsigned int max_bitflips = 0;
u32 ecc_stat, err;
int no_subpages;
u8 ecc_bit_mask, err_limit;
host->devtype_data->enable_hwecc(chip, ecc);
host->devtype_data->send_cmd(host, NAND_CMD_READ0, false);
mxc_do_addr_cycle(mtd, 0, page);
if (mtd->writesize > 512)
host->devtype_data->send_cmd(host,
NAND_CMD_READSTART, true);
host->devtype_data->send_page(mtd, NFC_OUTPUT);
if (buf)
memcpy32_fromio(buf, host->main_area0, mtd->writesize);
if (oob)
copy_spare(mtd, true, oob);
ecc_bit_mask = (host->eccsize == 4) ? 0x7 : 0xf;
err_limit = (host->eccsize == 4) ? 0x4 : 0x8;
no_subpages = mtd->writesize >> 9;
ecc_stat = host->devtype_data->get_ecc_status(host);
do {
err = ecc_stat & ecc_bit_mask;
if (err > err_limit) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += err;
max_bitflips = max_t(unsigned int, max_bitflips, err);
}
ecc_stat >>= 4;
} while (--no_subpages);
return max_bitflips;
}
static int mxc_nand_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mxc_nand_host *host = nand_get_controller_data(chip);
void *oob_buf;
if (oob_required)
oob_buf = chip->oob_poi;
else
oob_buf = NULL;
return host->devtype_data->read_page(chip, buf, oob_buf, 1, page);
}
static int mxc_nand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mxc_nand_host *host = nand_get_controller_data(chip);
void *oob_buf;
if (oob_required)
oob_buf = chip->oob_poi;
else
oob_buf = NULL;
return host->devtype_data->read_page(chip, buf, oob_buf, 0, page);
}
static int mxc_nand_read_oob(struct nand_chip *chip, int page)
{
struct mxc_nand_host *host = nand_get_controller_data(chip);
return host->devtype_data->read_page(chip, NULL, chip->oob_poi, 0,
page);
}
static int mxc_nand_write_page(struct nand_chip *chip, const uint8_t *buf,
bool ecc, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
host->devtype_data->enable_hwecc(chip, ecc);
host->devtype_data->send_cmd(host, NAND_CMD_SEQIN, false);
mxc_do_addr_cycle(mtd, 0, page);
memcpy32_toio(host->main_area0, buf, mtd->writesize);
copy_spare(mtd, false, chip->oob_poi);
host->devtype_data->send_page(mtd, NFC_INPUT);
host->devtype_data->send_cmd(host, NAND_CMD_PAGEPROG, true);
mxc_do_addr_cycle(mtd, 0, page);
return 0;
}
static int mxc_nand_write_page_ecc(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
return mxc_nand_write_page(chip, buf, true, page);
}
static int mxc_nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
return mxc_nand_write_page(chip, buf, false, page);
}
static int mxc_nand_write_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
memset(host->data_buf, 0xff, mtd->writesize);
return mxc_nand_write_page(chip, host->data_buf, false, page);
}
static u_char mxc_nand_read_byte(struct nand_chip *nand_chip)
{
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
uint8_t ret;
/* Check for status request */
if (host->status_request)
return host->devtype_data->get_dev_status(host) & 0xFF;
if (nand_chip->options & NAND_BUSWIDTH_16) {
/* only take the lower byte of each word */
ret = *(uint16_t *)(host->data_buf + host->buf_start);
host->buf_start += 2;
} else {
ret = *(uint8_t *)(host->data_buf + host->buf_start);
host->buf_start++;
}
dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
return ret;
}
/* Write data of length len to buffer buf. The data to be
* written on NAND Flash is first copied to RAMbuffer. After the Data Input
* Operation by the NFC, the data is written to NAND Flash */
static void mxc_nand_write_buf(struct nand_chip *nand_chip, const u_char *buf,
int len)
{
struct mtd_info *mtd = nand_to_mtd(nand_chip);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
u16 col = host->buf_start;
int n = mtd->oobsize + mtd->writesize - col;
n = min(n, len);
memcpy(host->data_buf + col, buf, n);
host->buf_start += n;
}
/* Read the data buffer from the NAND Flash. To read the data from NAND
* Flash first the data output cycle is initiated by the NFC, which copies
* the data to RAMbuffer. This data of length len is then copied to buffer buf.
*/
static void mxc_nand_read_buf(struct nand_chip *nand_chip, u_char *buf,
int len)
{
struct mtd_info *mtd = nand_to_mtd(nand_chip);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
u16 col = host->buf_start;
int n = mtd->oobsize + mtd->writesize - col;
n = min(n, len);
memcpy(buf, host->data_buf + col, n);
host->buf_start += n;
}
/* This function is used by upper layer for select and
* deselect of the NAND chip */
static void mxc_nand_select_chip_v1_v3(struct nand_chip *nand_chip, int chip)
{
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
if (chip == -1) {
/* Disable the NFC clock */
if (host->clk_act) {
clk_disable_unprepare(host->clk);
host->clk_act = 0;
}
return;
}
if (!host->clk_act) {
/* Enable the NFC clock */
clk_prepare_enable(host->clk);
host->clk_act = 1;
}
}
static void mxc_nand_select_chip_v2(struct nand_chip *nand_chip, int chip)
{
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
if (chip == -1) {
/* Disable the NFC clock */
if (host->clk_act) {
clk_disable_unprepare(host->clk);
host->clk_act = 0;
}
return;
}
if (!host->clk_act) {
/* Enable the NFC clock */
clk_prepare_enable(host->clk);
host->clk_act = 1;
}
host->active_cs = chip;
writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
}
#define MXC_V1_ECCBYTES 5
static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
if (section >= nand_chip->ecc.steps)
return -ERANGE;
oobregion->offset = (section * 16) + 6;
oobregion->length = MXC_V1_ECCBYTES;
return 0;
}
static int mxc_v1_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
if (section > nand_chip->ecc.steps)
return -ERANGE;
if (!section) {
if (mtd->writesize <= 512) {
oobregion->offset = 0;
oobregion->length = 5;
} else {
oobregion->offset = 2;
oobregion->length = 4;
}
} else {
oobregion->offset = ((section - 1) * 16) + MXC_V1_ECCBYTES + 6;
if (section < nand_chip->ecc.steps)
oobregion->length = (section * 16) + 6 -
oobregion->offset;
else
oobregion->length = mtd->oobsize - oobregion->offset;
}
return 0;
}
static const struct mtd_ooblayout_ops mxc_v1_ooblayout_ops = {
.ecc = mxc_v1_ooblayout_ecc,
.free = mxc_v1_ooblayout_free,
};
static int mxc_v2_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
if (section >= nand_chip->ecc.steps)
return -ERANGE;
oobregion->offset = (section * stepsize) + 7;
oobregion->length = nand_chip->ecc.bytes;
return 0;
}
static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
if (section >= nand_chip->ecc.steps)
return -ERANGE;
if (!section) {
if (mtd->writesize <= 512) {
oobregion->offset = 0;
oobregion->length = 5;
} else {
oobregion->offset = 2;
oobregion->length = 4;
}
} else {
oobregion->offset = section * stepsize;
oobregion->length = 7;
}
return 0;
}
static const struct mtd_ooblayout_ops mxc_v2_ooblayout_ops = {
.ecc = mxc_v2_ooblayout_ecc,
.free = mxc_v2_ooblayout_free,
};
/*
* v2 and v3 type controllers can do 4bit or 8bit ecc depending
* on how much oob the nand chip has. For 8bit ecc we need at least
* 26 bytes of oob data per 512 byte block.
*/
static int get_eccsize(struct mtd_info *mtd)
{
int oobbytes_per_512 = 0;
oobbytes_per_512 = mtd->oobsize * 512 / mtd->writesize;
if (oobbytes_per_512 < 26)
return 4;
else
return 8;
}
static void preset_v1(struct mtd_info *mtd)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
uint16_t config1 = 0;
if (nand_chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST &&
mtd->writesize)
config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
if (!host->devtype_data->irqpending_quirk)
config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
host->eccsize = 1;
writew(config1, NFC_V1_V2_CONFIG1);
/* preset operation */
/* Unlock the internal RAM Buffer */
writew(0x2, NFC_V1_V2_CONFIG);
/* Blocks to be unlocked */
writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
/* Unlock Block Command for given address range */
writew(0x4, NFC_V1_V2_WRPROT);
}
static int mxc_nand_v2_setup_interface(struct nand_chip *chip, int csline,
const struct nand_interface_config *conf)
{
struct mxc_nand_host *host = nand_get_controller_data(chip);
int tRC_min_ns, tRC_ps, ret;
unsigned long rate, rate_round;
const struct nand_sdr_timings *timings;
u16 config1;
timings = nand_get_sdr_timings(conf);
if (IS_ERR(timings))
return -ENOTSUPP;
config1 = readw(NFC_V1_V2_CONFIG1);
tRC_min_ns = timings->tRC_min / 1000;
rate = 1000000000 / tRC_min_ns;
/*
* For tRC < 30ns we have to use EDO mode. In this case the controller
* does one access per clock cycle. Otherwise the controller does one
* access in two clock cycles, thus we have to double the rate to the
* controller.
*/
if (tRC_min_ns < 30) {
rate_round = clk_round_rate(host->clk, rate);
config1 |= NFC_V2_CONFIG1_ONE_CYCLE;
tRC_ps = 1000000000 / (rate_round / 1000);
} else {
rate *= 2;
rate_round = clk_round_rate(host->clk, rate);
config1 &= ~NFC_V2_CONFIG1_ONE_CYCLE;
tRC_ps = 1000000000 / (rate_round / 1000 / 2);
}
/*
* The timing values compared against are from the i.MX25 Automotive
* datasheet, Table 50. NFC Timing Parameters
*/
if (timings->tCLS_min > tRC_ps - 1000 ||
timings->tCLH_min > tRC_ps - 2000 ||
timings->tCS_min > tRC_ps - 1000 ||
timings->tCH_min > tRC_ps - 2000 ||
timings->tWP_min > tRC_ps - 1500 ||
timings->tALS_min > tRC_ps ||
timings->tALH_min > tRC_ps - 3000 ||
timings->tDS_min > tRC_ps ||
timings->tDH_min > tRC_ps - 5000 ||
timings->tWC_min > 2 * tRC_ps ||
timings->tWH_min > tRC_ps - 2500 ||
timings->tRR_min > 6 * tRC_ps ||
timings->tRP_min > 3 * tRC_ps / 2 ||
timings->tRC_min > 2 * tRC_ps ||
timings->tREH_min > (tRC_ps / 2) - 2500) {
dev_dbg(host->dev, "Timing out of bounds\n");
return -EINVAL;
}
if (csline == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
ret = clk_set_rate(host->clk, rate);
if (ret)
return ret;
writew(config1, NFC_V1_V2_CONFIG1);
dev_dbg(host->dev, "Setting rate to %ldHz, %s mode\n", rate_round,
config1 & NFC_V2_CONFIG1_ONE_CYCLE ? "One cycle (EDO)" :
"normal");
return 0;
}
static void preset_v2(struct mtd_info *mtd)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
uint16_t config1 = 0;
config1 |= NFC_V2_CONFIG1_FP_INT;
if (!host->devtype_data->irqpending_quirk)
config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
if (mtd->writesize) {
uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
if (nand_chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
host->eccsize = get_eccsize(mtd);
if (host->eccsize == 4)
config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6);
} else {
host->eccsize = 1;
}
writew(config1, NFC_V1_V2_CONFIG1);
/* preset operation */
/* spare area size in 16-bit half-words */
writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
/* Unlock the internal RAM Buffer */
writew(0x2, NFC_V1_V2_CONFIG);
/* Blocks to be unlocked */
writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
/* Unlock Block Command for given address range */
writew(0x4, NFC_V1_V2_WRPROT);
}
static void preset_v3(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(chip);
uint32_t config2, config3;
int i, addr_phases;
writel(NFC_V3_CONFIG1_RBA(0), NFC_V3_CONFIG1);
writel(NFC_V3_IPC_CREQ, NFC_V3_IPC);
/* Unlock the internal RAM Buffer */
writel(NFC_V3_WRPROT_BLS_UNLOCK | NFC_V3_WRPROT_UNLOCK,
NFC_V3_WRPROT);
/* Blocks to be unlocked */
for (i = 0; i < NAND_MAX_CHIPS; i++)
writel(0xffff << 16, NFC_V3_WRPROT_UNLOCK_BLK_ADD0 + (i << 2));
writel(0, NFC_V3_IPC);
config2 = NFC_V3_CONFIG2_ONE_CYCLE |
NFC_V3_CONFIG2_2CMD_PHASES |
NFC_V3_CONFIG2_SPAS(mtd->oobsize >> 1) |
NFC_V3_CONFIG2_ST_CMD(0x70) |
NFC_V3_CONFIG2_INT_MSK |
NFC_V3_CONFIG2_NUM_ADDR_PHASE0;
addr_phases = fls(chip->pagemask) >> 3;
if (mtd->writesize == 2048) {
config2 |= NFC_V3_CONFIG2_PS_2048;
config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases);
} else if (mtd->writesize == 4096) {
config2 |= NFC_V3_CONFIG2_PS_4096;
config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases);
} else {
config2 |= NFC_V3_CONFIG2_PS_512;
config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases - 1);
}
if (mtd->writesize) {
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
config2 |= NFC_V3_CONFIG2_ECC_EN;
config2 |= NFC_V3_CONFIG2_PPB(
ffs(mtd->erasesize / mtd->writesize) - 6,
host->devtype_data->ppb_shift);
host->eccsize = get_eccsize(mtd);
if (host->eccsize == 8)
config2 |= NFC_V3_CONFIG2_ECC_MODE_8;
}
writel(config2, NFC_V3_CONFIG2);
config3 = NFC_V3_CONFIG3_NUM_OF_DEVICES(0) |
NFC_V3_CONFIG3_NO_SDMA |
NFC_V3_CONFIG3_RBB_MODE |
NFC_V3_CONFIG3_SBB(6) | /* Reset default */
NFC_V3_CONFIG3_ADD_OP(0);
if (!(chip->options & NAND_BUSWIDTH_16))
config3 |= NFC_V3_CONFIG3_FW8;
writel(config3, NFC_V3_CONFIG3);
writel(0, NFC_V3_DELAY_LINE);
}
/* Used by the upper layer to write command to NAND Flash for
* different operations to be carried out on NAND Flash */
static void mxc_nand_command(struct nand_chip *nand_chip, unsigned command,
int column, int page_addr)
{
struct mtd_info *mtd = nand_to_mtd(nand_chip);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
command, column, page_addr);
/* Reset command state information */
host->status_request = false;
/* Command pre-processing step */
switch (command) {
case NAND_CMD_RESET:
host->devtype_data->preset(mtd);
host->devtype_data->send_cmd(host, command, false);
break;
case NAND_CMD_STATUS:
host->buf_start = 0;
host->status_request = true;
host->devtype_data->send_cmd(host, command, true);
WARN_ONCE(column != -1 || page_addr != -1,
"Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
command, column, page_addr);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
case NAND_CMD_READID:
host->devtype_data->send_cmd(host, command, true);
mxc_do_addr_cycle(mtd, column, page_addr);
host->devtype_data->send_read_id(host);
host->buf_start = 0;
break;
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
host->devtype_data->send_cmd(host, command, false);
WARN_ONCE(column != -1,
"Unexpected column value (cmd=%u, col=%d)\n",
command, column);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
case NAND_CMD_PARAM:
host->devtype_data->send_cmd(host, command, false);
mxc_do_addr_cycle(mtd, column, page_addr);
host->devtype_data->send_page(mtd, NFC_OUTPUT);
memcpy32_fromio(host->data_buf, host->main_area0, 512);
host->buf_start = 0;
break;
default:
WARN_ONCE(1, "Unimplemented command (cmd=%u)\n",
command);
break;
}
}
static int mxc_nand_set_features(struct nand_chip *chip, int addr,
u8 *subfeature_param)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
int i;
host->buf_start = 0;
for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
chip->legacy.write_byte(chip, subfeature_param[i]);
memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
host->devtype_data->send_cmd(host, NAND_CMD_SET_FEATURES, false);
mxc_do_addr_cycle(mtd, addr, -1);
host->devtype_data->send_page(mtd, NFC_INPUT);
return 0;
}
static int mxc_nand_get_features(struct nand_chip *chip, int addr,
u8 *subfeature_param)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
int i;
host->devtype_data->send_cmd(host, NAND_CMD_GET_FEATURES, false);
mxc_do_addr_cycle(mtd, addr, -1);
host->devtype_data->send_page(mtd, NFC_OUTPUT);
memcpy32_fromio(host->data_buf, host->main_area0, 512);
host->buf_start = 0;
for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
*subfeature_param++ = chip->legacy.read_byte(chip);
return 0;
}
/*
* The generic flash bbt descriptors overlap with our ecc
* hardware, so define some i.MX specific ones.
*/
static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' };
static struct nand_bbt_descr bbt_main_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
.offs = 0,
.len = 4,
.veroffs = 4,
.maxblocks = 4,
.pattern = bbt_pattern,
};
static struct nand_bbt_descr bbt_mirror_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
.offs = 0,
.len = 4,
.veroffs = 4,
.maxblocks = 4,
.pattern = mirror_pattern,
};
/* v1 + irqpending_quirk: i.MX21 */
static const struct mxc_nand_devtype_data imx21_nand_devtype_data = {
.preset = preset_v1,
.read_page = mxc_nand_read_page_v1,
.send_cmd = send_cmd_v1_v2,
.send_addr = send_addr_v1_v2,
.send_page = send_page_v1,
.send_read_id = send_read_id_v1_v2,
.get_dev_status = get_dev_status_v1_v2,
.check_int = check_int_v1_v2,
.irq_control = irq_control_v1_v2,
.get_ecc_status = get_ecc_status_v1,
.ooblayout = &mxc_v1_ooblayout_ops,
.select_chip = mxc_nand_select_chip_v1_v3,
.enable_hwecc = mxc_nand_enable_hwecc_v1_v2,
.irqpending_quirk = 1,
.needs_ip = 0,
.regs_offset = 0xe00,
.spare0_offset = 0x800,
.spare_len = 16,
.eccbytes = 3,
.eccsize = 1,
};
/* v1 + !irqpending_quirk: i.MX27, i.MX31 */
static const struct mxc_nand_devtype_data imx27_nand_devtype_data = {
.preset = preset_v1,
.read_page = mxc_nand_read_page_v1,
.send_cmd = send_cmd_v1_v2,
.send_addr = send_addr_v1_v2,
.send_page = send_page_v1,
.send_read_id = send_read_id_v1_v2,
.get_dev_status = get_dev_status_v1_v2,
.check_int = check_int_v1_v2,
.irq_control = irq_control_v1_v2,
.get_ecc_status = get_ecc_status_v1,
.ooblayout = &mxc_v1_ooblayout_ops,
.select_chip = mxc_nand_select_chip_v1_v3,
.enable_hwecc = mxc_nand_enable_hwecc_v1_v2,
.irqpending_quirk = 0,
.needs_ip = 0,
.regs_offset = 0xe00,
.spare0_offset = 0x800,
.axi_offset = 0,
.spare_len = 16,
.eccbytes = 3,
.eccsize = 1,
};
/* v21: i.MX25, i.MX35 */
static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
.preset = preset_v2,
.read_page = mxc_nand_read_page_v2_v3,
.send_cmd = send_cmd_v1_v2,
.send_addr = send_addr_v1_v2,
.send_page = send_page_v2,
.send_read_id = send_read_id_v1_v2,
.get_dev_status = get_dev_status_v1_v2,
.check_int = check_int_v1_v2,
.irq_control = irq_control_v1_v2,
.get_ecc_status = get_ecc_status_v2,
.ooblayout = &mxc_v2_ooblayout_ops,
.select_chip = mxc_nand_select_chip_v2,
.setup_interface = mxc_nand_v2_setup_interface,
.enable_hwecc = mxc_nand_enable_hwecc_v1_v2,
.irqpending_quirk = 0,
.needs_ip = 0,
.regs_offset = 0x1e00,
.spare0_offset = 0x1000,
.axi_offset = 0,
.spare_len = 64,
.eccbytes = 9,
.eccsize = 0,
};
/* v3.2a: i.MX51 */
static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
.preset = preset_v3,
.read_page = mxc_nand_read_page_v2_v3,
.send_cmd = send_cmd_v3,
.send_addr = send_addr_v3,
.send_page = send_page_v3,
.send_read_id = send_read_id_v3,
.get_dev_status = get_dev_status_v3,
.check_int = check_int_v3,
.irq_control = irq_control_v3,
.get_ecc_status = get_ecc_status_v3,
.ooblayout = &mxc_v2_ooblayout_ops,
.select_chip = mxc_nand_select_chip_v1_v3,
.enable_hwecc = mxc_nand_enable_hwecc_v3,
.irqpending_quirk = 0,
.needs_ip = 1,
.regs_offset = 0,
.spare0_offset = 0x1000,
.axi_offset = 0x1e00,
.spare_len = 64,
.eccbytes = 0,
.eccsize = 0,
.ppb_shift = 7,
};
/* v3.2b: i.MX53 */
static const struct mxc_nand_devtype_data imx53_nand_devtype_data = {
.preset = preset_v3,
.read_page = mxc_nand_read_page_v2_v3,
.send_cmd = send_cmd_v3,
.send_addr = send_addr_v3,
.send_page = send_page_v3,
.send_read_id = send_read_id_v3,
.get_dev_status = get_dev_status_v3,
.check_int = check_int_v3,
.irq_control = irq_control_v3,
.get_ecc_status = get_ecc_status_v3,
.ooblayout = &mxc_v2_ooblayout_ops,
.select_chip = mxc_nand_select_chip_v1_v3,
.enable_hwecc = mxc_nand_enable_hwecc_v3,
.irqpending_quirk = 0,
.needs_ip = 1,
.regs_offset = 0,
.spare0_offset = 0x1000,
.axi_offset = 0x1e00,
.spare_len = 64,
.eccbytes = 0,
.eccsize = 0,
.ppb_shift = 8,
};
static inline int is_imx21_nfc(struct mxc_nand_host *host)
{
return host->devtype_data == &imx21_nand_devtype_data;
}
static inline int is_imx27_nfc(struct mxc_nand_host *host)
{
return host->devtype_data == &imx27_nand_devtype_data;
}
static inline int is_imx25_nfc(struct mxc_nand_host *host)
{
return host->devtype_data == &imx25_nand_devtype_data;
}
static const struct of_device_id mxcnd_dt_ids[] = {
{ .compatible = "fsl,imx21-nand", .data = &imx21_nand_devtype_data, },
{ .compatible = "fsl,imx27-nand", .data = &imx27_nand_devtype_data, },
{ .compatible = "fsl,imx25-nand", .data = &imx25_nand_devtype_data, },
{ .compatible = "fsl,imx51-nand", .data = &imx51_nand_devtype_data, },
{ .compatible = "fsl,imx53-nand", .data = &imx53_nand_devtype_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxcnd_dt_ids);
static int mxcnd_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
struct device *dev = mtd->dev.parent;
chip->ecc.bytes = host->devtype_data->eccbytes;
host->eccsize = host->devtype_data->eccsize;
chip->ecc.size = 512;
mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
switch (chip->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_ON_HOST:
chip->ecc.read_page = mxc_nand_read_page;
chip->ecc.read_page_raw = mxc_nand_read_page_raw;
chip->ecc.read_oob = mxc_nand_read_oob;
chip->ecc.write_page = mxc_nand_write_page_ecc;
chip->ecc.write_page_raw = mxc_nand_write_page_raw;
chip->ecc.write_oob = mxc_nand_write_oob;
break;
case NAND_ECC_ENGINE_TYPE_SOFT:
break;
default:
return -EINVAL;
}
if (chip->bbt_options & NAND_BBT_USE_FLASH) {
chip->bbt_td = &bbt_main_descr;
chip->bbt_md = &bbt_mirror_descr;
}
/* Allocate the right size buffer now */
devm_kfree(dev, (void *)host->data_buf);
host->data_buf = devm_kzalloc(dev, mtd->writesize + mtd->oobsize,
GFP_KERNEL);
if (!host->data_buf)
return -ENOMEM;
/* Call preset again, with correct writesize chip time */
host->devtype_data->preset(mtd);
if (!chip->ecc.bytes) {
if (host->eccsize == 8)
chip->ecc.bytes = 18;
else if (host->eccsize == 4)
chip->ecc.bytes = 9;
}
/*
* Experimentation shows that i.MX NFC can only handle up to 218 oob
* bytes. Limit used_oobsize to 218 so as to not confuse copy_spare()
* into copying invalid data to/from the spare IO buffer, as this
* might cause ECC data corruption when doing sub-page write to a
* partially written page.
*/
host->used_oobsize = min(mtd->oobsize, 218U);
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
if (is_imx21_nfc(host) || is_imx27_nfc(host))
chip->ecc.strength = 1;
else
chip->ecc.strength = (host->eccsize == 4) ? 4 : 8;
}
return 0;
}
static int mxcnd_setup_interface(struct nand_chip *chip, int chipnr,
const struct nand_interface_config *conf)
{
struct mxc_nand_host *host = nand_get_controller_data(chip);
return host->devtype_data->setup_interface(chip, chipnr, conf);
}
static const struct nand_controller_ops mxcnd_controller_ops = {
.attach_chip = mxcnd_attach_chip,
.setup_interface = mxcnd_setup_interface,
};
static int mxcnd_probe(struct platform_device *pdev)
{
struct nand_chip *this;
struct mtd_info *mtd;
struct mxc_nand_host *host;
int err = 0;
/* Allocate memory for MTD device structure and private data */
host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host),
GFP_KERNEL);
if (!host)
return -ENOMEM;
/* allocate a temporary buffer for the nand_scan_ident() */
host->data_buf = devm_kzalloc(&pdev->dev, PAGE_SIZE, GFP_KERNEL);
if (!host->data_buf)
return -ENOMEM;
host->dev = &pdev->dev;
/* structures must be linked */
this = &host->nand;
mtd = nand_to_mtd(this);
mtd->dev.parent = &pdev->dev;
mtd->name = DRIVER_NAME;
/* 50 us command delay time */
this->legacy.chip_delay = 5;
nand_set_controller_data(this, host);
nand_set_flash_node(this, pdev->dev.of_node);
this->legacy.dev_ready = mxc_nand_dev_ready;
this->legacy.cmdfunc = mxc_nand_command;
this->legacy.read_byte = mxc_nand_read_byte;
this->legacy.write_buf = mxc_nand_write_buf;
this->legacy.read_buf = mxc_nand_read_buf;
this->legacy.set_features = mxc_nand_set_features;
this->legacy.get_features = mxc_nand_get_features;
host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk))
return PTR_ERR(host->clk);
host->devtype_data = device_get_match_data(&pdev->dev);
if (!host->devtype_data->setup_interface)
this->options |= NAND_KEEP_TIMINGS;
if (host->devtype_data->needs_ip) {
host->regs_ip = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->regs_ip))
return PTR_ERR(host->regs_ip);
host->base = devm_platform_ioremap_resource(pdev, 1);
} else {
host->base = devm_platform_ioremap_resource(pdev, 0);
}
if (IS_ERR(host->base))
return PTR_ERR(host->base);
host->main_area0 = host->base;
if (host->devtype_data->regs_offset)
host->regs = host->base + host->devtype_data->regs_offset;
host->spare0 = host->base + host->devtype_data->spare0_offset;
if (host->devtype_data->axi_offset)
host->regs_axi = host->base + host->devtype_data->axi_offset;
this->legacy.select_chip = host->devtype_data->select_chip;
init_completion(&host->op_completion);
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0)
return host->irq;
/*
* Use host->devtype_data->irq_control() here instead of irq_control()
* because we must not disable_irq_nosync without having requested the
* irq.
*/
host->devtype_data->irq_control(host, 0);
err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq,
0, DRIVER_NAME, host);
if (err)
return err;
err = clk_prepare_enable(host->clk);
if (err)
return err;
host->clk_act = 1;
/*
* Now that we "own" the interrupt make sure the interrupt mask bit is
* cleared on i.MX21. Otherwise we can't read the interrupt status bit
* on this machine.
*/
if (host->devtype_data->irqpending_quirk) {
disable_irq_nosync(host->irq);
host->devtype_data->irq_control(host, 1);
}
/* Scan the NAND device */
this->legacy.dummy_controller.ops = &mxcnd_controller_ops;
err = nand_scan(this, is_imx25_nfc(host) ? 4 : 1);
if (err)
goto escan;
/* Register the partitions */
err = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
if (err)
goto cleanup_nand;
platform_set_drvdata(pdev, host);
return 0;
cleanup_nand:
nand_cleanup(this);
escan:
if (host->clk_act)
clk_disable_unprepare(host->clk);
return err;
}
static void mxcnd_remove(struct platform_device *pdev)
{
struct mxc_nand_host *host = platform_get_drvdata(pdev);
struct nand_chip *chip = &host->nand;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
if (host->clk_act)
clk_disable_unprepare(host->clk);
}
static struct platform_driver mxcnd_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = mxcnd_dt_ids,
},
.probe = mxcnd_probe,
.remove_new = mxcnd_remove,
};
module_platform_driver(mxcnd_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MXC NAND MTD driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/nand/raw/mxc_nand.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004 Richard Purdie
* Copyright (C) 2008 Dmitry Baryshkov
*
* Based on Sharp's NAND driver sharp_sl.c
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/sharpsl.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/io.h>
struct sharpsl_nand {
struct nand_controller controller;
struct nand_chip chip;
void __iomem *io;
};
static inline struct sharpsl_nand *mtd_to_sharpsl(struct mtd_info *mtd)
{
return container_of(mtd_to_nand(mtd), struct sharpsl_nand, chip);
}
/* register offset */
#define ECCLPLB 0x00 /* line parity 7 - 0 bit */
#define ECCLPUB 0x04 /* line parity 15 - 8 bit */
#define ECCCP 0x08 /* column parity 5 - 0 bit */
#define ECCCNTR 0x0C /* ECC byte counter */
#define ECCCLRR 0x10 /* cleare ECC */
#define FLASHIO 0x14 /* Flash I/O */
#define FLASHCTL 0x18 /* Flash Control */
/* Flash control bit */
#define FLRYBY (1 << 5)
#define FLCE1 (1 << 4)
#define FLWP (1 << 3)
#define FLALE (1 << 2)
#define FLCLE (1 << 1)
#define FLCE0 (1 << 0)
/*
* hardware specific access to control-lines
* ctrl:
* NAND_CNE: bit 0 -> ! bit 0 & 4
* NAND_CLE: bit 1 -> bit 1
* NAND_ALE: bit 2 -> bit 2
*
*/
static void sharpsl_nand_hwcontrol(struct nand_chip *chip, int cmd,
unsigned int ctrl)
{
struct sharpsl_nand *sharpsl = mtd_to_sharpsl(nand_to_mtd(chip));
if (ctrl & NAND_CTRL_CHANGE) {
unsigned char bits = ctrl & 0x07;
bits |= (ctrl & 0x01) << 4;
bits ^= 0x11;
writeb((readb(sharpsl->io + FLASHCTL) & ~0x17) | bits, sharpsl->io + FLASHCTL);
}
if (cmd != NAND_CMD_NONE)
writeb(cmd, chip->legacy.IO_ADDR_W);
}
static int sharpsl_nand_dev_ready(struct nand_chip *chip)
{
struct sharpsl_nand *sharpsl = mtd_to_sharpsl(nand_to_mtd(chip));
return !((readb(sharpsl->io + FLASHCTL) & FLRYBY) == 0);
}
static void sharpsl_nand_enable_hwecc(struct nand_chip *chip, int mode)
{
struct sharpsl_nand *sharpsl = mtd_to_sharpsl(nand_to_mtd(chip));
writeb(0, sharpsl->io + ECCCLRR);
}
static int sharpsl_nand_calculate_ecc(struct nand_chip *chip,
const u_char * dat, u_char * ecc_code)
{
struct sharpsl_nand *sharpsl = mtd_to_sharpsl(nand_to_mtd(chip));
ecc_code[0] = ~readb(sharpsl->io + ECCLPUB);
ecc_code[1] = ~readb(sharpsl->io + ECCLPLB);
ecc_code[2] = (~readb(sharpsl->io + ECCCP) << 2) | 0x03;
return readb(sharpsl->io + ECCCNTR) != 0;
}
static int sharpsl_attach_chip(struct nand_chip *chip)
{
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return 0;
chip->ecc.size = 256;
chip->ecc.bytes = 3;
chip->ecc.strength = 1;
chip->ecc.hwctl = sharpsl_nand_enable_hwecc;
chip->ecc.calculate = sharpsl_nand_calculate_ecc;
chip->ecc.correct = rawnand_sw_hamming_correct;
return 0;
}
static const struct nand_controller_ops sharpsl_ops = {
.attach_chip = sharpsl_attach_chip,
};
/*
* Main initialization routine
*/
static int sharpsl_nand_probe(struct platform_device *pdev)
{
struct nand_chip *this;
struct mtd_info *mtd;
struct resource *r;
int err = 0;
struct sharpsl_nand *sharpsl;
struct sharpsl_nand_platform_data *data = dev_get_platdata(&pdev->dev);
if (!data) {
dev_err(&pdev->dev, "no platform data!\n");
return -EINVAL;
}
/* Allocate memory for MTD device structure and private data */
sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL);
if (!sharpsl)
return -ENOMEM;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "no io memory resource defined!\n");
err = -ENODEV;
goto err_get_res;
}
/* map physical address */
sharpsl->io = ioremap(r->start, resource_size(r));
if (!sharpsl->io) {
dev_err(&pdev->dev, "ioremap to access Sharp SL NAND chip failed\n");
err = -EIO;
goto err_ioremap;
}
/* Get pointer to private data */
this = (struct nand_chip *)(&sharpsl->chip);
nand_controller_init(&sharpsl->controller);
sharpsl->controller.ops = &sharpsl_ops;
this->controller = &sharpsl->controller;
/* Link the private data with the MTD structure */
mtd = nand_to_mtd(this);
mtd->dev.parent = &pdev->dev;
mtd_set_ooblayout(mtd, data->ecc_layout);
platform_set_drvdata(pdev, sharpsl);
/*
* PXA initialize
*/
writeb(readb(sharpsl->io + FLASHCTL) | FLWP, sharpsl->io + FLASHCTL);
/* Set address of NAND IO lines */
this->legacy.IO_ADDR_R = sharpsl->io + FLASHIO;
this->legacy.IO_ADDR_W = sharpsl->io + FLASHIO;
/* Set address of hardware control function */
this->legacy.cmd_ctrl = sharpsl_nand_hwcontrol;
this->legacy.dev_ready = sharpsl_nand_dev_ready;
/* 15 us command delay time */
this->legacy.chip_delay = 15;
this->badblock_pattern = data->badblock_pattern;
/* Scan to find existence of the device */
err = nand_scan(this, 1);
if (err)
goto err_scan;
/* Register the partitions */
mtd->name = "sharpsl-nand";
err = mtd_device_parse_register(mtd, data->part_parsers, NULL,
data->partitions, data->nr_partitions);
if (err)
goto err_add;
/* Return happy */
return 0;
err_add:
nand_cleanup(this);
err_scan:
iounmap(sharpsl->io);
err_ioremap:
err_get_res:
kfree(sharpsl);
return err;
}
/*
* Clean up routine
*/
static void sharpsl_nand_remove(struct platform_device *pdev)
{
struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
struct nand_chip *chip = &sharpsl->chip;
int ret;
/* Unregister device */
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
/* Release resources */
nand_cleanup(chip);
iounmap(sharpsl->io);
/* Free the driver's structure */
kfree(sharpsl);
}
static struct platform_driver sharpsl_nand_driver = {
.driver = {
.name = "sharpsl-nand",
},
.probe = sharpsl_nand_probe,
.remove_new = sharpsl_nand_remove,
};
module_platform_driver(sharpsl_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Richard Purdie <[email protected]>");
MODULE_DESCRIPTION("Device specific logic for NAND flash on Sharp SL-C7xx Series");
| linux-master | drivers/mtd/nand/raw/sharpsl.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Hisilicon NAND Flash controller driver
*
* Copyright © 2012-2014 HiSilicon Technologies Co., Ltd.
* http://www.hisilicon.com
*
* Author: Zhou Wang <[email protected]>
* The initial developer of the original code is Zhiyong Cai
* <[email protected]>
*/
#include <linux/of.h>
#include <linux/mtd/mtd.h>
#include <linux/sizes.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/mtd/rawnand.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/mtd/partitions.h>
#define HINFC504_MAX_CHIP (4)
#define HINFC504_W_LATCH (5)
#define HINFC504_R_LATCH (7)
#define HINFC504_RW_LATCH (3)
#define HINFC504_NFC_TIMEOUT (2 * HZ)
#define HINFC504_NFC_PM_TIMEOUT (1 * HZ)
#define HINFC504_NFC_DMA_TIMEOUT (5 * HZ)
#define HINFC504_CHIP_DELAY (25)
#define HINFC504_REG_BASE_ADDRESS_LEN (0x100)
#define HINFC504_BUFFER_BASE_ADDRESS_LEN (2048 + 128)
#define HINFC504_ADDR_CYCLE_MASK 0x4
#define HINFC504_CON 0x00
#define HINFC504_CON_OP_MODE_NORMAL BIT(0)
#define HINFC504_CON_PAGEISZE_SHIFT (1)
#define HINFC504_CON_PAGESIZE_MASK (0x07)
#define HINFC504_CON_BUS_WIDTH BIT(4)
#define HINFC504_CON_READY_BUSY_SEL BIT(8)
#define HINFC504_CON_ECCTYPE_SHIFT (9)
#define HINFC504_CON_ECCTYPE_MASK (0x07)
#define HINFC504_PWIDTH 0x04
#define SET_HINFC504_PWIDTH(_w_lcnt, _r_lcnt, _rw_hcnt) \
((_w_lcnt) | (((_r_lcnt) & 0x0F) << 4) | (((_rw_hcnt) & 0x0F) << 8))
#define HINFC504_CMD 0x0C
#define HINFC504_ADDRL 0x10
#define HINFC504_ADDRH 0x14
#define HINFC504_DATA_NUM 0x18
#define HINFC504_OP 0x1C
#define HINFC504_OP_READ_DATA_EN BIT(1)
#define HINFC504_OP_WAIT_READY_EN BIT(2)
#define HINFC504_OP_CMD2_EN BIT(3)
#define HINFC504_OP_WRITE_DATA_EN BIT(4)
#define HINFC504_OP_ADDR_EN BIT(5)
#define HINFC504_OP_CMD1_EN BIT(6)
#define HINFC504_OP_NF_CS_SHIFT (7)
#define HINFC504_OP_NF_CS_MASK (3)
#define HINFC504_OP_ADDR_CYCLE_SHIFT (9)
#define HINFC504_OP_ADDR_CYCLE_MASK (7)
#define HINFC504_STATUS 0x20
#define HINFC504_READY BIT(0)
#define HINFC504_INTEN 0x24
#define HINFC504_INTEN_DMA BIT(9)
#define HINFC504_INTEN_UE BIT(6)
#define HINFC504_INTEN_CE BIT(5)
#define HINFC504_INTS 0x28
#define HINFC504_INTS_DMA BIT(9)
#define HINFC504_INTS_UE BIT(6)
#define HINFC504_INTS_CE BIT(5)
#define HINFC504_INTCLR 0x2C
#define HINFC504_INTCLR_DMA BIT(9)
#define HINFC504_INTCLR_UE BIT(6)
#define HINFC504_INTCLR_CE BIT(5)
#define HINFC504_ECC_STATUS 0x5C
#define HINFC504_ECC_16_BIT_SHIFT 12
#define HINFC504_DMA_CTRL 0x60
#define HINFC504_DMA_CTRL_DMA_START BIT(0)
#define HINFC504_DMA_CTRL_WE BIT(1)
#define HINFC504_DMA_CTRL_DATA_AREA_EN BIT(2)
#define HINFC504_DMA_CTRL_OOB_AREA_EN BIT(3)
#define HINFC504_DMA_CTRL_BURST4_EN BIT(4)
#define HINFC504_DMA_CTRL_BURST8_EN BIT(5)
#define HINFC504_DMA_CTRL_BURST16_EN BIT(6)
#define HINFC504_DMA_CTRL_ADDR_NUM_SHIFT (7)
#define HINFC504_DMA_CTRL_ADDR_NUM_MASK (1)
#define HINFC504_DMA_CTRL_CS_SHIFT (8)
#define HINFC504_DMA_CTRL_CS_MASK (0x03)
#define HINFC504_DMA_ADDR_DATA 0x64
#define HINFC504_DMA_ADDR_OOB 0x68
#define HINFC504_DMA_LEN 0x6C
#define HINFC504_DMA_LEN_OOB_SHIFT (16)
#define HINFC504_DMA_LEN_OOB_MASK (0xFFF)
#define HINFC504_DMA_PARA 0x70
#define HINFC504_DMA_PARA_DATA_RW_EN BIT(0)
#define HINFC504_DMA_PARA_OOB_RW_EN BIT(1)
#define HINFC504_DMA_PARA_DATA_EDC_EN BIT(2)
#define HINFC504_DMA_PARA_OOB_EDC_EN BIT(3)
#define HINFC504_DMA_PARA_DATA_ECC_EN BIT(4)
#define HINFC504_DMA_PARA_OOB_ECC_EN BIT(5)
#define HINFC_VERSION 0x74
#define HINFC504_LOG_READ_ADDR 0x7C
#define HINFC504_LOG_READ_LEN 0x80
#define HINFC504_NANDINFO_LEN 0x10
struct hinfc_host {
struct nand_chip chip;
struct device *dev;
void __iomem *iobase;
void __iomem *mmio;
struct completion cmd_complete;
unsigned int offset;
unsigned int command;
int chipselect;
unsigned int addr_cycle;
u32 addr_value[2];
u32 cache_addr_value[2];
char *buffer;
dma_addr_t dma_buffer;
dma_addr_t dma_oob;
int version;
unsigned int irq_status; /* interrupt status */
};
static inline unsigned int hinfc_read(struct hinfc_host *host, unsigned int reg)
{
return readl(host->iobase + reg);
}
static inline void hinfc_write(struct hinfc_host *host, unsigned int value,
unsigned int reg)
{
writel(value, host->iobase + reg);
}
static void wait_controller_finished(struct hinfc_host *host)
{
unsigned long timeout = jiffies + HINFC504_NFC_TIMEOUT;
int val;
while (time_before(jiffies, timeout)) {
val = hinfc_read(host, HINFC504_STATUS);
if (host->command == NAND_CMD_ERASE2) {
/* nfc is ready */
while (!(val & HINFC504_READY)) {
usleep_range(500, 1000);
val = hinfc_read(host, HINFC504_STATUS);
}
return;
}
if (val & HINFC504_READY)
return;
}
/* wait cmd timeout */
dev_err(host->dev, "Wait NAND controller exec cmd timeout.\n");
}
static void hisi_nfc_dma_transfer(struct hinfc_host *host, int todev)
{
struct nand_chip *chip = &host->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned long val;
int ret;
hinfc_write(host, host->dma_buffer, HINFC504_DMA_ADDR_DATA);
hinfc_write(host, host->dma_oob, HINFC504_DMA_ADDR_OOB);
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
hinfc_write(host, ((mtd->oobsize & HINFC504_DMA_LEN_OOB_MASK)
<< HINFC504_DMA_LEN_OOB_SHIFT), HINFC504_DMA_LEN);
hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
| HINFC504_DMA_PARA_OOB_RW_EN, HINFC504_DMA_PARA);
} else {
if (host->command == NAND_CMD_READOOB)
hinfc_write(host, HINFC504_DMA_PARA_OOB_RW_EN
| HINFC504_DMA_PARA_OOB_EDC_EN
| HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
else
hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
| HINFC504_DMA_PARA_OOB_RW_EN
| HINFC504_DMA_PARA_DATA_EDC_EN
| HINFC504_DMA_PARA_OOB_EDC_EN
| HINFC504_DMA_PARA_DATA_ECC_EN
| HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
}
val = (HINFC504_DMA_CTRL_DMA_START | HINFC504_DMA_CTRL_BURST4_EN
| HINFC504_DMA_CTRL_BURST8_EN | HINFC504_DMA_CTRL_BURST16_EN
| HINFC504_DMA_CTRL_DATA_AREA_EN | HINFC504_DMA_CTRL_OOB_AREA_EN
| ((host->addr_cycle == 4 ? 1 : 0)
<< HINFC504_DMA_CTRL_ADDR_NUM_SHIFT)
| ((host->chipselect & HINFC504_DMA_CTRL_CS_MASK)
<< HINFC504_DMA_CTRL_CS_SHIFT));
if (todev)
val |= HINFC504_DMA_CTRL_WE;
init_completion(&host->cmd_complete);
hinfc_write(host, val, HINFC504_DMA_CTRL);
ret = wait_for_completion_timeout(&host->cmd_complete,
HINFC504_NFC_DMA_TIMEOUT);
if (!ret) {
dev_err(host->dev, "DMA operation(irq) timeout!\n");
/* sanity check */
val = hinfc_read(host, HINFC504_DMA_CTRL);
if (!(val & HINFC504_DMA_CTRL_DMA_START))
dev_err(host->dev, "DMA is already done but without irq ACK!\n");
else
dev_err(host->dev, "DMA is really timeout!\n");
}
}
static int hisi_nfc_send_cmd_pageprog(struct hinfc_host *host)
{
host->addr_value[0] &= 0xffff0000;
hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
hinfc_write(host, NAND_CMD_PAGEPROG << 8 | NAND_CMD_SEQIN,
HINFC504_CMD);
hisi_nfc_dma_transfer(host, 1);
return 0;
}
static int hisi_nfc_send_cmd_readstart(struct hinfc_host *host)
{
struct mtd_info *mtd = nand_to_mtd(&host->chip);
if ((host->addr_value[0] == host->cache_addr_value[0]) &&
(host->addr_value[1] == host->cache_addr_value[1]))
return 0;
host->addr_value[0] &= 0xffff0000;
hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
hinfc_write(host, NAND_CMD_READSTART << 8 | NAND_CMD_READ0,
HINFC504_CMD);
hinfc_write(host, 0, HINFC504_LOG_READ_ADDR);
hinfc_write(host, mtd->writesize + mtd->oobsize,
HINFC504_LOG_READ_LEN);
hisi_nfc_dma_transfer(host, 0);
host->cache_addr_value[0] = host->addr_value[0];
host->cache_addr_value[1] = host->addr_value[1];
return 0;
}
static int hisi_nfc_send_cmd_erase(struct hinfc_host *host)
{
hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
hinfc_write(host, (NAND_CMD_ERASE2 << 8) | NAND_CMD_ERASE1,
HINFC504_CMD);
hinfc_write(host, HINFC504_OP_WAIT_READY_EN
| HINFC504_OP_CMD2_EN
| HINFC504_OP_CMD1_EN
| HINFC504_OP_ADDR_EN
| ((host->chipselect & HINFC504_OP_NF_CS_MASK)
<< HINFC504_OP_NF_CS_SHIFT)
| ((host->addr_cycle & HINFC504_OP_ADDR_CYCLE_MASK)
<< HINFC504_OP_ADDR_CYCLE_SHIFT),
HINFC504_OP);
wait_controller_finished(host);
return 0;
}
static int hisi_nfc_send_cmd_readid(struct hinfc_host *host)
{
hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
hinfc_write(host, NAND_CMD_READID, HINFC504_CMD);
hinfc_write(host, 0, HINFC504_ADDRL);
hinfc_write(host, HINFC504_OP_CMD1_EN | HINFC504_OP_ADDR_EN
| HINFC504_OP_READ_DATA_EN
| ((host->chipselect & HINFC504_OP_NF_CS_MASK)
<< HINFC504_OP_NF_CS_SHIFT)
| 1 << HINFC504_OP_ADDR_CYCLE_SHIFT, HINFC504_OP);
wait_controller_finished(host);
return 0;
}
static int hisi_nfc_send_cmd_status(struct hinfc_host *host)
{
hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
hinfc_write(host, NAND_CMD_STATUS, HINFC504_CMD);
hinfc_write(host, HINFC504_OP_CMD1_EN
| HINFC504_OP_READ_DATA_EN
| ((host->chipselect & HINFC504_OP_NF_CS_MASK)
<< HINFC504_OP_NF_CS_SHIFT),
HINFC504_OP);
wait_controller_finished(host);
return 0;
}
static int hisi_nfc_send_cmd_reset(struct hinfc_host *host, int chipselect)
{
hinfc_write(host, NAND_CMD_RESET, HINFC504_CMD);
hinfc_write(host, HINFC504_OP_CMD1_EN
| ((chipselect & HINFC504_OP_NF_CS_MASK)
<< HINFC504_OP_NF_CS_SHIFT)
| HINFC504_OP_WAIT_READY_EN,
HINFC504_OP);
wait_controller_finished(host);
return 0;
}
static void hisi_nfc_select_chip(struct nand_chip *chip, int chipselect)
{
struct hinfc_host *host = nand_get_controller_data(chip);
if (chipselect < 0)
return;
host->chipselect = chipselect;
}
static uint8_t hisi_nfc_read_byte(struct nand_chip *chip)
{
struct hinfc_host *host = nand_get_controller_data(chip);
if (host->command == NAND_CMD_STATUS)
return *(uint8_t *)(host->mmio);
host->offset++;
if (host->command == NAND_CMD_READID)
return *(uint8_t *)(host->mmio + host->offset - 1);
return *(uint8_t *)(host->buffer + host->offset - 1);
}
static void
hisi_nfc_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
{
struct hinfc_host *host = nand_get_controller_data(chip);
memcpy(host->buffer + host->offset, buf, len);
host->offset += len;
}
static void hisi_nfc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
struct hinfc_host *host = nand_get_controller_data(chip);
memcpy(buf, host->buffer + host->offset, len);
host->offset += len;
}
static void set_addr(struct mtd_info *mtd, int column, int page_addr)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct hinfc_host *host = nand_get_controller_data(chip);
unsigned int command = host->command;
host->addr_cycle = 0;
host->addr_value[0] = 0;
host->addr_value[1] = 0;
/* Serially input address */
if (column != -1) {
/* Adjust columns for 16 bit buswidth */
if (chip->options & NAND_BUSWIDTH_16 &&
!nand_opcode_8bits(command))
column >>= 1;
host->addr_value[0] = column & 0xffff;
host->addr_cycle = 2;
}
if (page_addr != -1) {
host->addr_value[0] |= (page_addr & 0xffff)
<< (host->addr_cycle * 8);
host->addr_cycle += 2;
if (chip->options & NAND_ROW_ADDR_3) {
host->addr_cycle += 1;
if (host->command == NAND_CMD_ERASE1)
host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
else
host->addr_value[1] |= ((page_addr >> 16) & 0xff);
}
}
}
static void hisi_nfc_cmdfunc(struct nand_chip *chip, unsigned command,
int column, int page_addr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct hinfc_host *host = nand_get_controller_data(chip);
int is_cache_invalid = 1;
unsigned int flag = 0;
host->command = command;
switch (command) {
case NAND_CMD_READ0:
case NAND_CMD_READOOB:
if (command == NAND_CMD_READ0)
host->offset = column;
else
host->offset = column + mtd->writesize;
is_cache_invalid = 0;
set_addr(mtd, column, page_addr);
hisi_nfc_send_cmd_readstart(host);
break;
case NAND_CMD_SEQIN:
host->offset = column;
set_addr(mtd, column, page_addr);
break;
case NAND_CMD_ERASE1:
set_addr(mtd, column, page_addr);
break;
case NAND_CMD_PAGEPROG:
hisi_nfc_send_cmd_pageprog(host);
break;
case NAND_CMD_ERASE2:
hisi_nfc_send_cmd_erase(host);
break;
case NAND_CMD_READID:
host->offset = column;
memset(host->mmio, 0, 0x10);
hisi_nfc_send_cmd_readid(host);
break;
case NAND_CMD_STATUS:
flag = hinfc_read(host, HINFC504_CON);
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
hinfc_write(host,
flag & ~(HINFC504_CON_ECCTYPE_MASK <<
HINFC504_CON_ECCTYPE_SHIFT), HINFC504_CON);
host->offset = 0;
memset(host->mmio, 0, 0x10);
hisi_nfc_send_cmd_status(host);
hinfc_write(host, flag, HINFC504_CON);
break;
case NAND_CMD_RESET:
hisi_nfc_send_cmd_reset(host, host->chipselect);
break;
default:
dev_err(host->dev, "Error: unsupported cmd(cmd=%x, col=%x, page=%x)\n",
command, column, page_addr);
}
if (is_cache_invalid) {
host->cache_addr_value[0] = ~0;
host->cache_addr_value[1] = ~0;
}
}
static irqreturn_t hinfc_irq_handle(int irq, void *devid)
{
struct hinfc_host *host = devid;
unsigned int flag;
flag = hinfc_read(host, HINFC504_INTS);
/* store interrupts state */
host->irq_status |= flag;
if (flag & HINFC504_INTS_DMA) {
hinfc_write(host, HINFC504_INTCLR_DMA, HINFC504_INTCLR);
complete(&host->cmd_complete);
} else if (flag & HINFC504_INTS_CE) {
hinfc_write(host, HINFC504_INTCLR_CE, HINFC504_INTCLR);
} else if (flag & HINFC504_INTS_UE) {
hinfc_write(host, HINFC504_INTCLR_UE, HINFC504_INTCLR);
}
return IRQ_HANDLED;
}
static int hisi_nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct hinfc_host *host = nand_get_controller_data(chip);
int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc;
int stat_1, stat_2;
nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
/* errors which can not be corrected by ECC */
if (host->irq_status & HINFC504_INTS_UE) {
mtd->ecc_stats.failed++;
} else if (host->irq_status & HINFC504_INTS_CE) {
/* TODO: need add other ECC modes! */
switch (chip->ecc.strength) {
case 16:
status_ecc = hinfc_read(host, HINFC504_ECC_STATUS) >>
HINFC504_ECC_16_BIT_SHIFT & 0x0fff;
stat_2 = status_ecc & 0x3f;
stat_1 = status_ecc >> 6 & 0x3f;
stat = stat_1 + stat_2;
stat_max = max_t(int, stat_1, stat_2);
}
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(int, max_bitflips, stat_max);
}
host->irq_status = 0;
return max_bitflips;
}
static int hisi_nand_read_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct hinfc_host *host = nand_get_controller_data(chip);
nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
if (host->irq_status & HINFC504_INTS_UE) {
host->irq_status = 0;
return -EBADMSG;
}
host->irq_status = 0;
return 0;
}
static int hisi_nand_write_page_hwecc(struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
static void hisi_nfc_host_init(struct hinfc_host *host)
{
struct nand_chip *chip = &host->chip;
unsigned int flag = 0;
host->version = hinfc_read(host, HINFC_VERSION);
host->addr_cycle = 0;
host->addr_value[0] = 0;
host->addr_value[1] = 0;
host->cache_addr_value[0] = ~0;
host->cache_addr_value[1] = ~0;
host->chipselect = 0;
/* default page size: 2K, ecc_none. need modify */
flag = HINFC504_CON_OP_MODE_NORMAL | HINFC504_CON_READY_BUSY_SEL
| ((0x001 & HINFC504_CON_PAGESIZE_MASK)
<< HINFC504_CON_PAGEISZE_SHIFT)
| ((0x0 & HINFC504_CON_ECCTYPE_MASK)
<< HINFC504_CON_ECCTYPE_SHIFT)
| ((chip->options & NAND_BUSWIDTH_16) ?
HINFC504_CON_BUS_WIDTH : 0);
hinfc_write(host, flag, HINFC504_CON);
memset(host->mmio, 0xff, HINFC504_BUFFER_BASE_ADDRESS_LEN);
hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
/* enable DMA irq */
hinfc_write(host, HINFC504_INTEN_DMA, HINFC504_INTEN);
}
static int hisi_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
/* FIXME: add ECC bytes position */
return -ENOTSUPP;
}
static int hisi_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section)
return -ERANGE;
oobregion->offset = 2;
oobregion->length = 6;
return 0;
}
static const struct mtd_ooblayout_ops hisi_ooblayout_ops = {
.ecc = hisi_ooblayout_ecc,
.free = hisi_ooblayout_free,
};
static int hisi_nfc_ecc_probe(struct hinfc_host *host)
{
unsigned int flag;
int size, strength, ecc_bits;
struct device *dev = host->dev;
struct nand_chip *chip = &host->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
size = chip->ecc.size;
strength = chip->ecc.strength;
if (size != 1024) {
dev_err(dev, "error ecc size: %d\n", size);
return -EINVAL;
}
if ((size == 1024) && ((strength != 8) && (strength != 16) &&
(strength != 24) && (strength != 40))) {
dev_err(dev, "ecc size and strength do not match\n");
return -EINVAL;
}
chip->ecc.size = size;
chip->ecc.strength = strength;
chip->ecc.read_page = hisi_nand_read_page_hwecc;
chip->ecc.read_oob = hisi_nand_read_oob;
chip->ecc.write_page = hisi_nand_write_page_hwecc;
switch (chip->ecc.strength) {
case 16:
ecc_bits = 6;
if (mtd->writesize == 2048)
mtd_set_ooblayout(mtd, &hisi_ooblayout_ops);
/* TODO: add more page size support */
break;
/* TODO: add more ecc strength support */
default:
dev_err(dev, "not support strength: %d\n", chip->ecc.strength);
return -EINVAL;
}
flag = hinfc_read(host, HINFC504_CON);
/* add ecc type configure */
flag |= ((ecc_bits & HINFC504_CON_ECCTYPE_MASK)
<< HINFC504_CON_ECCTYPE_SHIFT);
hinfc_write(host, flag, HINFC504_CON);
/* enable ecc irq */
flag = hinfc_read(host, HINFC504_INTEN) & 0xfff;
hinfc_write(host, flag | HINFC504_INTEN_UE | HINFC504_INTEN_CE,
HINFC504_INTEN);
return 0;
}
static int hisi_nfc_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct hinfc_host *host = nand_get_controller_data(chip);
int flag;
host->buffer = dmam_alloc_coherent(host->dev,
mtd->writesize + mtd->oobsize,
&host->dma_buffer, GFP_KERNEL);
if (!host->buffer)
return -ENOMEM;
host->dma_oob = host->dma_buffer + mtd->writesize;
memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize);
flag = hinfc_read(host, HINFC504_CON);
flag &= ~(HINFC504_CON_PAGESIZE_MASK << HINFC504_CON_PAGEISZE_SHIFT);
switch (mtd->writesize) {
case 2048:
flag |= (0x001 << HINFC504_CON_PAGEISZE_SHIFT);
break;
/*
* TODO: add more pagesize support,
* default pagesize has been set in hisi_nfc_host_init
*/
default:
dev_err(host->dev, "NON-2KB page size nand flash\n");
return -EINVAL;
}
hinfc_write(host, flag, HINFC504_CON);
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
hisi_nfc_ecc_probe(host);
return 0;
}
static const struct nand_controller_ops hisi_nfc_controller_ops = {
.attach_chip = hisi_nfc_attach_chip,
};
static int hisi_nfc_probe(struct platform_device *pdev)
{
int ret = 0, irq, max_chips = HINFC504_MAX_CHIP;
struct device *dev = &pdev->dev;
struct hinfc_host *host;
struct nand_chip *chip;
struct mtd_info *mtd;
struct device_node *np = dev->of_node;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
host->dev = dev;
platform_set_drvdata(pdev, host);
chip = &host->chip;
mtd = nand_to_mtd(chip);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENXIO;
host->iobase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->iobase))
return PTR_ERR(host->iobase);
host->mmio = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(host->mmio))
return PTR_ERR(host->mmio);
mtd->name = "hisi_nand";
mtd->dev.parent = &pdev->dev;
nand_set_controller_data(chip, host);
nand_set_flash_node(chip, np);
chip->legacy.cmdfunc = hisi_nfc_cmdfunc;
chip->legacy.select_chip = hisi_nfc_select_chip;
chip->legacy.read_byte = hisi_nfc_read_byte;
chip->legacy.write_buf = hisi_nfc_write_buf;
chip->legacy.read_buf = hisi_nfc_read_buf;
chip->legacy.chip_delay = HINFC504_CHIP_DELAY;
chip->legacy.set_features = nand_get_set_features_notsupp;
chip->legacy.get_features = nand_get_set_features_notsupp;
hisi_nfc_host_init(host);
ret = devm_request_irq(dev, irq, hinfc_irq_handle, 0x0, "nandc", host);
if (ret) {
dev_err(dev, "failed to request IRQ\n");
return ret;
}
chip->legacy.dummy_controller.ops = &hisi_nfc_controller_ops;
ret = nand_scan(chip, max_chips);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "Err MTD partition=%d\n", ret);
nand_cleanup(chip);
return ret;
}
return 0;
}
static void hisi_nfc_remove(struct platform_device *pdev)
{
struct hinfc_host *host = platform_get_drvdata(pdev);
struct nand_chip *chip = &host->chip;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
}
#ifdef CONFIG_PM_SLEEP
static int hisi_nfc_suspend(struct device *dev)
{
struct hinfc_host *host = dev_get_drvdata(dev);
unsigned long timeout = jiffies + HINFC504_NFC_PM_TIMEOUT;
while (time_before(jiffies, timeout)) {
if (((hinfc_read(host, HINFC504_STATUS) & 0x1) == 0x0) &&
(hinfc_read(host, HINFC504_DMA_CTRL) &
HINFC504_DMA_CTRL_DMA_START)) {
cond_resched();
return 0;
}
}
dev_err(host->dev, "nand controller suspend timeout.\n");
return -EAGAIN;
}
static int hisi_nfc_resume(struct device *dev)
{
int cs;
struct hinfc_host *host = dev_get_drvdata(dev);
struct nand_chip *chip = &host->chip;
for (cs = 0; cs < nanddev_ntargets(&chip->base); cs++)
hisi_nfc_send_cmd_reset(host, cs);
hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(hisi_nfc_pm_ops, hisi_nfc_suspend, hisi_nfc_resume);
static const struct of_device_id nfc_id_table[] = {
{ .compatible = "hisilicon,504-nfc" },
{}
};
MODULE_DEVICE_TABLE(of, nfc_id_table);
static struct platform_driver hisi_nfc_driver = {
.driver = {
.name = "hisi_nand",
.of_match_table = nfc_id_table,
.pm = &hisi_nfc_pm_ops,
},
.probe = hisi_nfc_probe,
.remove_new = hisi_nfc_remove,
};
module_platform_driver(hisi_nfc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Zhou Wang");
MODULE_AUTHOR("Zhiyong Cai");
MODULE_DESCRIPTION("Hisilicon Nand Flash Controller Driver");
| linux-master | drivers/mtd/nand/raw/hisi504_nand.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NXP LPC32XX NAND SLC driver
*
* Authors:
* Kevin Wells <[email protected]>
* Roland Stigge <[email protected]>
*
* Copyright © 2011 NXP Semiconductors
* Copyright © 2012 Roland Stigge
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/mtd/lpc32xx_slc.h>
#define LPC32XX_MODNAME "lpc32xx-nand"
/**********************************************************************
* SLC NAND controller register offsets
**********************************************************************/
#define SLC_DATA(x) (x + 0x000)
#define SLC_ADDR(x) (x + 0x004)
#define SLC_CMD(x) (x + 0x008)
#define SLC_STOP(x) (x + 0x00C)
#define SLC_CTRL(x) (x + 0x010)
#define SLC_CFG(x) (x + 0x014)
#define SLC_STAT(x) (x + 0x018)
#define SLC_INT_STAT(x) (x + 0x01C)
#define SLC_IEN(x) (x + 0x020)
#define SLC_ISR(x) (x + 0x024)
#define SLC_ICR(x) (x + 0x028)
#define SLC_TAC(x) (x + 0x02C)
#define SLC_TC(x) (x + 0x030)
#define SLC_ECC(x) (x + 0x034)
#define SLC_DMA_DATA(x) (x + 0x038)
/**********************************************************************
* slc_ctrl register definitions
**********************************************************************/
#define SLCCTRL_SW_RESET (1 << 2) /* Reset the NAND controller bit */
#define SLCCTRL_ECC_CLEAR (1 << 1) /* Reset ECC bit */
#define SLCCTRL_DMA_START (1 << 0) /* Start DMA channel bit */
/**********************************************************************
* slc_cfg register definitions
**********************************************************************/
#define SLCCFG_CE_LOW (1 << 5) /* Force CE low bit */
#define SLCCFG_DMA_ECC (1 << 4) /* Enable DMA ECC bit */
#define SLCCFG_ECC_EN (1 << 3) /* ECC enable bit */
#define SLCCFG_DMA_BURST (1 << 2) /* DMA burst bit */
#define SLCCFG_DMA_DIR (1 << 1) /* DMA write(0)/read(1) bit */
#define SLCCFG_WIDTH (1 << 0) /* External device width, 0=8bit */
/**********************************************************************
* slc_stat register definitions
**********************************************************************/
#define SLCSTAT_DMA_FIFO (1 << 2) /* DMA FIFO has data bit */
#define SLCSTAT_SLC_FIFO (1 << 1) /* SLC FIFO has data bit */
#define SLCSTAT_NAND_READY (1 << 0) /* NAND device is ready bit */
/**********************************************************************
* slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
**********************************************************************/
#define SLCSTAT_INT_TC (1 << 1) /* Transfer count bit */
#define SLCSTAT_INT_RDY_EN (1 << 0) /* Ready interrupt bit */
/**********************************************************************
* slc_tac register definitions
**********************************************************************/
/* Computation of clock cycles on basis of controller and device clock rates */
#define SLCTAC_CLOCKS(c, n, s) (min_t(u32, DIV_ROUND_UP(c, n) - 1, 0xF) << s)
/* Clock setting for RDY write sample wait time in 2*n clocks */
#define SLCTAC_WDR(n) (((n) & 0xF) << 28)
/* Write pulse width in clock cycles, 1 to 16 clocks */
#define SLCTAC_WWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 24))
/* Write hold time of control and data signals, 1 to 16 clocks */
#define SLCTAC_WHOLD(c, n) (SLCTAC_CLOCKS(c, n, 20))
/* Write setup time of control and data signals, 1 to 16 clocks */
#define SLCTAC_WSETUP(c, n) (SLCTAC_CLOCKS(c, n, 16))
/* Clock setting for RDY read sample wait time in 2*n clocks */
#define SLCTAC_RDR(n) (((n) & 0xF) << 12)
/* Read pulse width in clock cycles, 1 to 16 clocks */
#define SLCTAC_RWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 8))
/* Read hold time of control and data signals, 1 to 16 clocks */
#define SLCTAC_RHOLD(c, n) (SLCTAC_CLOCKS(c, n, 4))
/* Read setup time of control and data signals, 1 to 16 clocks */
#define SLCTAC_RSETUP(c, n) (SLCTAC_CLOCKS(c, n, 0))
/**********************************************************************
* slc_ecc register definitions
**********************************************************************/
/* ECC line party fetch macro */
#define SLCECC_TO_LINEPAR(n) (((n) >> 6) & 0x7FFF)
#define SLCECC_TO_COLPAR(n) ((n) & 0x3F)
/*
* DMA requires storage space for the DMA local buffer and the hardware ECC
* storage area. The DMA local buffer is only used if DMA mapping fails
* during runtime.
*/
#define LPC32XX_DMA_DATA_SIZE 4096
#define LPC32XX_ECC_SAVE_SIZE ((4096 / 256) * 4)
/* Number of bytes used for ECC stored in NAND per 256 bytes */
#define LPC32XX_SLC_DEV_ECC_BYTES 3
/*
* If the NAND base clock frequency can't be fetched, this frequency will be
* used instead as the base. This rate is used to setup the timing registers
* used for NAND accesses.
*/
#define LPC32XX_DEF_BUS_RATE 133250000
/* Milliseconds for DMA FIFO timeout (unlikely anyway) */
#define LPC32XX_DMA_TIMEOUT 100
/*
* NAND ECC Layout for small page NAND devices
* Note: For large and huge page devices, the default layouts are used
*/
static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section)
return -ERANGE;
oobregion->length = 6;
oobregion->offset = 10;
return 0;
}
static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section > 1)
return -ERANGE;
if (!section) {
oobregion->offset = 0;
oobregion->length = 4;
} else {
oobregion->offset = 6;
oobregion->length = 4;
}
return 0;
}
static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
.ecc = lpc32xx_ooblayout_ecc,
.free = lpc32xx_ooblayout_free,
};
static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
/*
* Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
* Note: Large page devices used the default layout
*/
static struct nand_bbt_descr bbt_smallpage_main_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
.offs = 0,
.len = 4,
.veroffs = 6,
.maxblocks = 4,
.pattern = bbt_pattern
};
static struct nand_bbt_descr bbt_smallpage_mirror_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
.offs = 0,
.len = 4,
.veroffs = 6,
.maxblocks = 4,
.pattern = mirror_pattern
};
/*
* NAND platform configuration structure
*/
struct lpc32xx_nand_cfg_slc {
uint32_t wdr_clks;
uint32_t wwidth;
uint32_t whold;
uint32_t wsetup;
uint32_t rdr_clks;
uint32_t rwidth;
uint32_t rhold;
uint32_t rsetup;
struct mtd_partition *parts;
unsigned num_parts;
};
struct lpc32xx_nand_host {
struct nand_chip nand_chip;
struct lpc32xx_slc_platform_data *pdata;
struct clk *clk;
struct gpio_desc *wp_gpio;
void __iomem *io_base;
struct lpc32xx_nand_cfg_slc *ncfg;
struct completion comp;
struct dma_chan *dma_chan;
uint32_t dma_buf_len;
struct dma_slave_config dma_slave_config;
struct scatterlist sgl;
/*
* DMA and CPU addresses of ECC work area and data buffer
*/
uint32_t *ecc_buf;
uint8_t *data_buf;
dma_addr_t io_base_dma;
};
static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
{
uint32_t clkrate, tmp;
/* Reset SLC controller */
writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
udelay(1000);
/* Basic setup */
writel(0, SLC_CFG(host->io_base));
writel(0, SLC_IEN(host->io_base));
writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
SLC_ICR(host->io_base));
/* Get base clock for SLC block */
clkrate = clk_get_rate(host->clk);
if (clkrate == 0)
clkrate = LPC32XX_DEF_BUS_RATE;
/* Compute clock setup values */
tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
SLCTAC_WWIDTH(clkrate, host->ncfg->wwidth) |
SLCTAC_WHOLD(clkrate, host->ncfg->whold) |
SLCTAC_WSETUP(clkrate, host->ncfg->wsetup) |
SLCTAC_RDR(host->ncfg->rdr_clks) |
SLCTAC_RWIDTH(clkrate, host->ncfg->rwidth) |
SLCTAC_RHOLD(clkrate, host->ncfg->rhold) |
SLCTAC_RSETUP(clkrate, host->ncfg->rsetup);
writel(tmp, SLC_TAC(host->io_base));
}
/*
* Hardware specific access to control lines
*/
static void lpc32xx_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
unsigned int ctrl)
{
uint32_t tmp;
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
/* Does CE state need to be changed? */
tmp = readl(SLC_CFG(host->io_base));
if (ctrl & NAND_NCE)
tmp |= SLCCFG_CE_LOW;
else
tmp &= ~SLCCFG_CE_LOW;
writel(tmp, SLC_CFG(host->io_base));
if (cmd != NAND_CMD_NONE) {
if (ctrl & NAND_CLE)
writel(cmd, SLC_CMD(host->io_base));
else
writel(cmd, SLC_ADDR(host->io_base));
}
}
/*
* Read the Device Ready pin
*/
static int lpc32xx_nand_device_ready(struct nand_chip *chip)
{
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
int rdy = 0;
if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
rdy = 1;
return rdy;
}
/*
* Enable NAND write protect
*/
static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
{
if (host->wp_gpio)
gpiod_set_value_cansleep(host->wp_gpio, 1);
}
/*
* Disable NAND write protect
*/
static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
{
if (host->wp_gpio)
gpiod_set_value_cansleep(host->wp_gpio, 0);
}
/*
* Prepares SLC for transfers with H/W ECC enabled
*/
static void lpc32xx_nand_ecc_enable(struct nand_chip *chip, int mode)
{
/* Hardware ECC is enabled automatically in hardware as needed */
}
/*
* Calculates the ECC for the data
*/
static int lpc32xx_nand_ecc_calculate(struct nand_chip *chip,
const unsigned char *buf,
unsigned char *code)
{
/*
* ECC is calculated automatically in hardware during syndrome read
* and write operations, so it doesn't need to be calculated here.
*/
return 0;
}
/*
* Read a single byte from NAND device
*/
static uint8_t lpc32xx_nand_read_byte(struct nand_chip *chip)
{
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
return (uint8_t)readl(SLC_DATA(host->io_base));
}
/*
* Simple device read without ECC
*/
static void lpc32xx_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
{
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
/* Direct device read with no ECC */
while (len-- > 0)
*buf++ = (uint8_t)readl(SLC_DATA(host->io_base));
}
/*
* Simple device write without ECC
*/
static void lpc32xx_nand_write_buf(struct nand_chip *chip, const uint8_t *buf,
int len)
{
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
/* Direct device write with no ECC */
while (len-- > 0)
writel((uint32_t)*buf++, SLC_DATA(host->io_base));
}
/*
* Read the OOB data from the device without ECC using FIFO method
*/
static int lpc32xx_nand_read_oob_syndrome(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/*
* Write the OOB data to the device without ECC using FIFO method
*/
static int lpc32xx_nand_write_oob_syndrome(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
mtd->oobsize);
}
/*
* Fills in the ECC fields in the OOB buffer with the hardware generated ECC
*/
static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count)
{
int i;
for (i = 0; i < (count * 3); i += 3) {
uint32_t ce = ecc[i / 3];
ce = ~(ce << 2) & 0xFFFFFF;
spare[i + 2] = (uint8_t)(ce & 0xFF);
ce >>= 8;
spare[i + 1] = (uint8_t)(ce & 0xFF);
ce >>= 8;
spare[i] = (uint8_t)(ce & 0xFF);
}
}
static void lpc32xx_dma_complete_func(void *completion)
{
complete(completion);
}
static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
void *mem, int len, enum dma_transfer_direction dir)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
struct dma_async_tx_descriptor *desc;
int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
int res;
host->dma_slave_config.direction = dir;
host->dma_slave_config.src_addr = dma;
host->dma_slave_config.dst_addr = dma;
host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
host->dma_slave_config.src_maxburst = 4;
host->dma_slave_config.dst_maxburst = 4;
/* DMA controller does flow control: */
host->dma_slave_config.device_fc = false;
if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
return -ENXIO;
}
sg_init_one(&host->sgl, mem, len);
res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
DMA_BIDIRECTIONAL);
if (res != 1) {
dev_err(mtd->dev.parent, "Failed to map sg list\n");
return -ENXIO;
}
desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
flags);
if (!desc) {
dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
goto out1;
}
init_completion(&host->comp);
desc->callback = lpc32xx_dma_complete_func;
desc->callback_param = &host->comp;
dmaengine_submit(desc);
dma_async_issue_pending(host->dma_chan);
wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));
dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
DMA_BIDIRECTIONAL);
return 0;
out1:
dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
DMA_BIDIRECTIONAL);
return -ENXIO;
}
/*
* DMA read/write transfers with ECC support
*/
static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
int read)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
int i, status = 0;
unsigned long timeout;
int res;
enum dma_transfer_direction dir =
read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
uint8_t *dma_buf;
bool dma_mapped;
if ((void *)buf <= high_memory) {
dma_buf = buf;
dma_mapped = true;
} else {
dma_buf = host->data_buf;
dma_mapped = false;
if (!read)
memcpy(host->data_buf, buf, mtd->writesize);
}
if (read) {
writel(readl(SLC_CFG(host->io_base)) |
SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
} else {
writel((readl(SLC_CFG(host->io_base)) |
SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
~SLCCFG_DMA_DIR,
SLC_CFG(host->io_base));
}
/* Clear initial ECC */
writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
/* Transfer size is data area only */
writel(mtd->writesize, SLC_TC(host->io_base));
/* Start transfer in the NAND controller */
writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
SLC_CTRL(host->io_base));
for (i = 0; i < chip->ecc.steps; i++) {
/* Data */
res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
dma_buf + i * chip->ecc.size,
mtd->writesize / chip->ecc.steps, dir);
if (res)
return res;
/* Always _read_ ECC */
if (i == chip->ecc.steps - 1)
break;
if (!read) /* ECC availability delayed on write */
udelay(10);
res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
&host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
if (res)
return res;
}
/*
* According to NXP, the DMA can be finished here, but the NAND
* controller may still have buffered data. After porting to using the
* dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
* appears to be always true, according to tests. Keeping the check for
* safety reasons for now.
*/
if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
dev_warn(mtd->dev.parent, "FIFO not empty!\n");
timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
time_before(jiffies, timeout))
cpu_relax();
if (!time_before(jiffies, timeout)) {
dev_err(mtd->dev.parent, "FIFO held data too long\n");
status = -EIO;
}
}
/* Read last calculated ECC value */
if (!read)
udelay(10);
host->ecc_buf[chip->ecc.steps - 1] =
readl(SLC_ECC(host->io_base));
/* Flush DMA */
dmaengine_terminate_all(host->dma_chan);
if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
readl(SLC_TC(host->io_base))) {
/* Something is left in the FIFO, something is wrong */
dev_err(mtd->dev.parent, "DMA FIFO failure\n");
status = -EIO;
}
/* Stop DMA & HW ECC */
writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
SLC_CTRL(host->io_base));
writel(readl(SLC_CFG(host->io_base)) &
~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
SLCCFG_DMA_BURST), SLC_CFG(host->io_base));
if (!dma_mapped && read)
memcpy(buf, host->data_buf, mtd->writesize);
return status;
}
/*
* Read the data and OOB data from the device, use ECC correction with the
* data, disable ECC for the OOB data
*/
static int lpc32xx_nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
struct mtd_oob_region oobregion = { };
int stat, i, status, error;
uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
/* Issue read command */
nand_read_page_op(chip, page, 0, NULL, 0);
/* Read data and oob, calculate ECC */
status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
/* Get OOB data */
chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
/* Convert to stored ECC format */
lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
/* Pointer to ECC data retrieved from NAND spare area */
error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
if (error)
return error;
oobecc = chip->oob_poi + oobregion.offset;
for (i = 0; i < chip->ecc.steps; i++) {
stat = chip->ecc.correct(chip, buf, oobecc,
&tmpecc[i * chip->ecc.bytes]);
if (stat < 0)
mtd->ecc_stats.failed++;
else
mtd->ecc_stats.corrected += stat;
buf += chip->ecc.size;
oobecc += chip->ecc.bytes;
}
return status;
}
/*
* Read the data and OOB data from the device, no ECC correction with the
* data or OOB data
*/
static int lpc32xx_nand_read_page_raw_syndrome(struct nand_chip *chip,
uint8_t *buf, int oob_required,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
/* Issue read command */
nand_read_page_op(chip, page, 0, NULL, 0);
/* Raw reads can just use the FIFO interface */
chip->legacy.read_buf(chip, buf, chip->ecc.size * chip->ecc.steps);
chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
return 0;
}
/*
* Write the data and OOB data to the device, use ECC with the data,
* disable ECC for the OOB data
*/
static int lpc32xx_nand_write_page_syndrome(struct nand_chip *chip,
const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
struct mtd_oob_region oobregion = { };
uint8_t *pb;
int error;
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
/* Write data, calculate ECC on outbound data */
error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
if (error)
return error;
/*
* The calculated ECC needs some manual work done to it before
* committing it to NAND. Process the calculated ECC and place
* the resultant values directly into the OOB buffer. */
error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
if (error)
return error;
pb = chip->oob_poi + oobregion.offset;
lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
/* Write ECC data to device */
chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
/*
* Write the data and OOB data to the device, no ECC correction with the
* data or OOB data
*/
static int lpc32xx_nand_write_page_raw_syndrome(struct nand_chip *chip,
const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
/* Raw writes can just use the FIFO interface */
nand_prog_page_begin_op(chip, page, 0, buf,
chip->ecc.size * chip->ecc.steps);
chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
{
struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
dma_cap_mask_t mask;
if (!host->pdata || !host->pdata->dma_filter) {
dev_err(mtd->dev.parent, "no DMA platform data\n");
return -ENOENT;
}
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
"nand-slc");
if (!host->dma_chan) {
dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
return -EBUSY;
}
return 0;
}
static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
{
struct lpc32xx_nand_cfg_slc *ncfg;
struct device_node *np = dev->of_node;
ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
if (!ncfg)
return NULL;
of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
of_property_read_u32(np, "nxp,whold", &ncfg->whold);
of_property_read_u32(np, "nxp,wsetup", &ncfg->wsetup);
of_property_read_u32(np, "nxp,rdr-clks", &ncfg->rdr_clks);
of_property_read_u32(np, "nxp,rwidth", &ncfg->rwidth);
of_property_read_u32(np, "nxp,rhold", &ncfg->rhold);
of_property_read_u32(np, "nxp,rsetup", &ncfg->rsetup);
if (!ncfg->wdr_clks || !ncfg->wwidth || !ncfg->whold ||
!ncfg->wsetup || !ncfg->rdr_clks || !ncfg->rwidth ||
!ncfg->rhold || !ncfg->rsetup) {
dev_err(dev, "chip parameters not specified correctly\n");
return NULL;
}
return ncfg;
}
static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return 0;
/* OOB and ECC CPU and DMA work areas */
host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
/*
* Small page FLASH has a unique OOB layout, but large and huge
* page FLASH use the standard layout. Small page FLASH uses a
* custom BBT marker layout.
*/
if (mtd->writesize <= 512)
mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
/* These sizes remain the same regardless of page size */
chip->ecc.size = 256;
chip->ecc.strength = 1;
chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
chip->ecc.prepad = 0;
chip->ecc.postpad = 0;
chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
chip->ecc.correct = rawnand_sw_hamming_correct;
chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
/*
* Use a custom BBT marker setup for small page FLASH that
* won't interfere with the ECC layout. Large and huge page
* FLASH use the standard layout.
*/
if ((chip->bbt_options & NAND_BBT_USE_FLASH) &&
mtd->writesize <= 512) {
chip->bbt_td = &bbt_smallpage_main_descr;
chip->bbt_md = &bbt_smallpage_mirror_descr;
}
return 0;
}
static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
.attach_chip = lpc32xx_nand_attach_chip,
};
/*
* Probe for NAND controller
*/
static int lpc32xx_nand_probe(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host;
struct mtd_info *mtd;
struct nand_chip *chip;
struct resource *rc;
int res;
/* Allocate memory for the device structure (and zero it) */
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
host->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &rc);
if (IS_ERR(host->io_base))
return PTR_ERR(host->io_base);
host->io_base_dma = rc->start;
if (pdev->dev.of_node)
host->ncfg = lpc32xx_parse_dt(&pdev->dev);
if (!host->ncfg) {
dev_err(&pdev->dev,
"Missing or bad NAND config from device tree\n");
return -ENOENT;
}
/* Start with WP disabled, if available */
host->wp_gpio = gpiod_get_optional(&pdev->dev, NULL, GPIOD_OUT_LOW);
res = PTR_ERR_OR_ZERO(host->wp_gpio);
if (res) {
if (res != -EPROBE_DEFER)
dev_err(&pdev->dev, "WP GPIO is not available: %d\n",
res);
return res;
}
gpiod_set_consumer_name(host->wp_gpio, "NAND WP");
host->pdata = dev_get_platdata(&pdev->dev);
chip = &host->nand_chip;
mtd = nand_to_mtd(chip);
nand_set_controller_data(chip, host);
nand_set_flash_node(chip, pdev->dev.of_node);
mtd->owner = THIS_MODULE;
mtd->dev.parent = &pdev->dev;
/* Get NAND clock */
host->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "Clock failure\n");
res = -ENOENT;
goto enable_wp;
}
/* Set NAND IO addresses and command/ready functions */
chip->legacy.IO_ADDR_R = SLC_DATA(host->io_base);
chip->legacy.IO_ADDR_W = SLC_DATA(host->io_base);
chip->legacy.cmd_ctrl = lpc32xx_nand_cmd_ctrl;
chip->legacy.dev_ready = lpc32xx_nand_device_ready;
chip->legacy.chip_delay = 20; /* 20us command delay time */
/* Init NAND controller */
lpc32xx_nand_setup(host);
platform_set_drvdata(pdev, host);
/* NAND callbacks for LPC32xx SLC hardware */
chip->legacy.read_byte = lpc32xx_nand_read_byte;
chip->legacy.read_buf = lpc32xx_nand_read_buf;
chip->legacy.write_buf = lpc32xx_nand_write_buf;
/*
* Allocate a large enough buffer for a single huge page plus
* extra space for the spare area and ECC storage area
*/
host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
GFP_KERNEL);
if (host->data_buf == NULL) {
res = -ENOMEM;
goto enable_wp;
}
res = lpc32xx_nand_dma_setup(host);
if (res) {
res = -EIO;
goto enable_wp;
}
/* Find NAND device */
chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
res = nand_scan(chip, 1);
if (res)
goto release_dma;
mtd->name = "nxp_lpc3220_slc";
res = mtd_device_register(mtd, host->ncfg->parts,
host->ncfg->num_parts);
if (res)
goto cleanup_nand;
return 0;
cleanup_nand:
nand_cleanup(chip);
release_dma:
dma_release_channel(host->dma_chan);
enable_wp:
lpc32xx_wp_enable(host);
return res;
}
/*
* Remove NAND device.
*/
static void lpc32xx_nand_remove(struct platform_device *pdev)
{
uint32_t tmp;
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
struct nand_chip *chip = &host->nand_chip;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
dma_release_channel(host->dma_chan);
/* Force CE high */
tmp = readl(SLC_CTRL(host->io_base));
tmp &= ~SLCCFG_CE_LOW;
writel(tmp, SLC_CTRL(host->io_base));
lpc32xx_wp_enable(host);
}
static int lpc32xx_nand_resume(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
int ret;
/* Re-enable NAND clock */
ret = clk_prepare_enable(host->clk);
if (ret)
return ret;
/* Fresh init of NAND controller */
lpc32xx_nand_setup(host);
/* Disable write protect */
lpc32xx_wp_disable(host);
return 0;
}
static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
{
uint32_t tmp;
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
/* Force CE high */
tmp = readl(SLC_CTRL(host->io_base));
tmp &= ~SLCCFG_CE_LOW;
writel(tmp, SLC_CTRL(host->io_base));
/* Enable write protect for safety */
lpc32xx_wp_enable(host);
/* Disable clock */
clk_disable_unprepare(host->clk);
return 0;
}
static const struct of_device_id lpc32xx_nand_match[] = {
{ .compatible = "nxp,lpc3220-slc" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
static struct platform_driver lpc32xx_nand_driver = {
.probe = lpc32xx_nand_probe,
.remove_new = lpc32xx_nand_remove,
.resume = pm_ptr(lpc32xx_nand_resume),
.suspend = pm_ptr(lpc32xx_nand_suspend),
.driver = {
.name = LPC32XX_MODNAME,
.of_match_table = lpc32xx_nand_match,
},
};
module_platform_driver(lpc32xx_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kevin Wells <[email protected]>");
MODULE_AUTHOR("Roland Stigge <[email protected]>");
MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");
| linux-master | drivers/mtd/nand/raw/lpc32xx_slc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2018
* Author: Christophe Kerello <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/mtd/rawnand.h>
#include <linux/of_address.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
/* Bad block marker length */
#define FMC2_BBM_LEN 2
/* ECC step size */
#define FMC2_ECC_STEP_SIZE 512
/* BCHDSRx registers length */
#define FMC2_BCHDSRS_LEN 20
/* HECCR length */
#define FMC2_HECCR_LEN 4
/* Max requests done for a 8k nand page size */
#define FMC2_MAX_SG 16
/* Max chip enable */
#define FMC2_MAX_CE 2
/* Max ECC buffer length */
#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
#define FMC2_TIMEOUT_MS 5000
/* Timings */
#define FMC2_THIZ 1
#define FMC2_TIO 8000
#define FMC2_TSYNC 3000
#define FMC2_PCR_TIMING_MASK 0xf
#define FMC2_PMEM_PATT_TIMING_MASK 0xff
/* FMC2 Controller Registers */
#define FMC2_BCR1 0x0
#define FMC2_PCR 0x80
#define FMC2_SR 0x84
#define FMC2_PMEM 0x88
#define FMC2_PATT 0x8c
#define FMC2_HECCR 0x94
#define FMC2_ISR 0x184
#define FMC2_ICR 0x188
#define FMC2_CSQCR 0x200
#define FMC2_CSQCFGR1 0x204
#define FMC2_CSQCFGR2 0x208
#define FMC2_CSQCFGR3 0x20c
#define FMC2_CSQAR1 0x210
#define FMC2_CSQAR2 0x214
#define FMC2_CSQIER 0x220
#define FMC2_CSQISR 0x224
#define FMC2_CSQICR 0x228
#define FMC2_CSQEMSR 0x230
#define FMC2_BCHIER 0x250
#define FMC2_BCHISR 0x254
#define FMC2_BCHICR 0x258
#define FMC2_BCHPBR1 0x260
#define FMC2_BCHPBR2 0x264
#define FMC2_BCHPBR3 0x268
#define FMC2_BCHPBR4 0x26c
#define FMC2_BCHDSR0 0x27c
#define FMC2_BCHDSR1 0x280
#define FMC2_BCHDSR2 0x284
#define FMC2_BCHDSR3 0x288
#define FMC2_BCHDSR4 0x28c
/* Register: FMC2_BCR1 */
#define FMC2_BCR1_FMC2EN BIT(31)
/* Register: FMC2_PCR */
#define FMC2_PCR_PWAITEN BIT(1)
#define FMC2_PCR_PBKEN BIT(2)
#define FMC2_PCR_PWID GENMASK(5, 4)
#define FMC2_PCR_PWID_BUSWIDTH_8 0
#define FMC2_PCR_PWID_BUSWIDTH_16 1
#define FMC2_PCR_ECCEN BIT(6)
#define FMC2_PCR_ECCALG BIT(8)
#define FMC2_PCR_TCLR GENMASK(12, 9)
#define FMC2_PCR_TCLR_DEFAULT 0xf
#define FMC2_PCR_TAR GENMASK(16, 13)
#define FMC2_PCR_TAR_DEFAULT 0xf
#define FMC2_PCR_ECCSS GENMASK(19, 17)
#define FMC2_PCR_ECCSS_512 1
#define FMC2_PCR_ECCSS_2048 3
#define FMC2_PCR_BCHECC BIT(24)
#define FMC2_PCR_WEN BIT(25)
/* Register: FMC2_SR */
#define FMC2_SR_NWRF BIT(6)
/* Register: FMC2_PMEM */
#define FMC2_PMEM_MEMSET GENMASK(7, 0)
#define FMC2_PMEM_MEMWAIT GENMASK(15, 8)
#define FMC2_PMEM_MEMHOLD GENMASK(23, 16)
#define FMC2_PMEM_MEMHIZ GENMASK(31, 24)
#define FMC2_PMEM_DEFAULT 0x0a0a0a0a
/* Register: FMC2_PATT */
#define FMC2_PATT_ATTSET GENMASK(7, 0)
#define FMC2_PATT_ATTWAIT GENMASK(15, 8)
#define FMC2_PATT_ATTHOLD GENMASK(23, 16)
#define FMC2_PATT_ATTHIZ GENMASK(31, 24)
#define FMC2_PATT_DEFAULT 0x0a0a0a0a
/* Register: FMC2_ISR */
#define FMC2_ISR_IHLF BIT(1)
/* Register: FMC2_ICR */
#define FMC2_ICR_CIHLF BIT(1)
/* Register: FMC2_CSQCR */
#define FMC2_CSQCR_CSQSTART BIT(0)
/* Register: FMC2_CSQCFGR1 */
#define FMC2_CSQCFGR1_CMD2EN BIT(1)
#define FMC2_CSQCFGR1_DMADEN BIT(2)
#define FMC2_CSQCFGR1_ACYNBR GENMASK(6, 4)
#define FMC2_CSQCFGR1_CMD1 GENMASK(15, 8)
#define FMC2_CSQCFGR1_CMD2 GENMASK(23, 16)
#define FMC2_CSQCFGR1_CMD1T BIT(24)
#define FMC2_CSQCFGR1_CMD2T BIT(25)
/* Register: FMC2_CSQCFGR2 */
#define FMC2_CSQCFGR2_SQSDTEN BIT(0)
#define FMC2_CSQCFGR2_RCMD2EN BIT(1)
#define FMC2_CSQCFGR2_DMASEN BIT(2)
#define FMC2_CSQCFGR2_RCMD1 GENMASK(15, 8)
#define FMC2_CSQCFGR2_RCMD2 GENMASK(23, 16)
#define FMC2_CSQCFGR2_RCMD1T BIT(24)
#define FMC2_CSQCFGR2_RCMD2T BIT(25)
/* Register: FMC2_CSQCFGR3 */
#define FMC2_CSQCFGR3_SNBR GENMASK(13, 8)
#define FMC2_CSQCFGR3_AC1T BIT(16)
#define FMC2_CSQCFGR3_AC2T BIT(17)
#define FMC2_CSQCFGR3_AC3T BIT(18)
#define FMC2_CSQCFGR3_AC4T BIT(19)
#define FMC2_CSQCFGR3_AC5T BIT(20)
#define FMC2_CSQCFGR3_SDT BIT(21)
#define FMC2_CSQCFGR3_RAC1T BIT(22)
#define FMC2_CSQCFGR3_RAC2T BIT(23)
/* Register: FMC2_CSQCAR1 */
#define FMC2_CSQCAR1_ADDC1 GENMASK(7, 0)
#define FMC2_CSQCAR1_ADDC2 GENMASK(15, 8)
#define FMC2_CSQCAR1_ADDC3 GENMASK(23, 16)
#define FMC2_CSQCAR1_ADDC4 GENMASK(31, 24)
/* Register: FMC2_CSQCAR2 */
#define FMC2_CSQCAR2_ADDC5 GENMASK(7, 0)
#define FMC2_CSQCAR2_NANDCEN GENMASK(11, 10)
#define FMC2_CSQCAR2_SAO GENMASK(31, 16)
/* Register: FMC2_CSQIER */
#define FMC2_CSQIER_TCIE BIT(0)
/* Register: FMC2_CSQICR */
#define FMC2_CSQICR_CLEAR_IRQ GENMASK(4, 0)
/* Register: FMC2_CSQEMSR */
#define FMC2_CSQEMSR_SEM GENMASK(15, 0)
/* Register: FMC2_BCHIER */
#define FMC2_BCHIER_DERIE BIT(1)
#define FMC2_BCHIER_EPBRIE BIT(4)
/* Register: FMC2_BCHICR */
#define FMC2_BCHICR_CLEAR_IRQ GENMASK(4, 0)
/* Register: FMC2_BCHDSR0 */
#define FMC2_BCHDSR0_DUE BIT(0)
#define FMC2_BCHDSR0_DEF BIT(1)
#define FMC2_BCHDSR0_DEN GENMASK(7, 4)
/* Register: FMC2_BCHDSR1 */
#define FMC2_BCHDSR1_EBP1 GENMASK(12, 0)
#define FMC2_BCHDSR1_EBP2 GENMASK(28, 16)
/* Register: FMC2_BCHDSR2 */
#define FMC2_BCHDSR2_EBP3 GENMASK(12, 0)
#define FMC2_BCHDSR2_EBP4 GENMASK(28, 16)
/* Register: FMC2_BCHDSR3 */
#define FMC2_BCHDSR3_EBP5 GENMASK(12, 0)
#define FMC2_BCHDSR3_EBP6 GENMASK(28, 16)
/* Register: FMC2_BCHDSR4 */
#define FMC2_BCHDSR4_EBP7 GENMASK(12, 0)
#define FMC2_BCHDSR4_EBP8 GENMASK(28, 16)
enum stm32_fmc2_ecc {
FMC2_ECC_HAM = 1,
FMC2_ECC_BCH4 = 4,
FMC2_ECC_BCH8 = 8
};
enum stm32_fmc2_irq_state {
FMC2_IRQ_UNKNOWN = 0,
FMC2_IRQ_BCH,
FMC2_IRQ_SEQ
};
struct stm32_fmc2_timings {
u8 tclr;
u8 tar;
u8 thiz;
u8 twait;
u8 thold_mem;
u8 tset_mem;
u8 thold_att;
u8 tset_att;
};
struct stm32_fmc2_nand {
struct nand_chip chip;
struct gpio_desc *wp_gpio;
struct stm32_fmc2_timings timings;
int ncs;
int cs_used[FMC2_MAX_CE];
};
static inline struct stm32_fmc2_nand *to_fmc2_nand(struct nand_chip *chip)
{
return container_of(chip, struct stm32_fmc2_nand, chip);
}
struct stm32_fmc2_nfc {
struct nand_controller base;
struct stm32_fmc2_nand nand;
struct device *dev;
struct device *cdev;
struct regmap *regmap;
void __iomem *data_base[FMC2_MAX_CE];
void __iomem *cmd_base[FMC2_MAX_CE];
void __iomem *addr_base[FMC2_MAX_CE];
phys_addr_t io_phys_addr;
phys_addr_t data_phys_addr[FMC2_MAX_CE];
struct clk *clk;
u8 irq_state;
struct dma_chan *dma_tx_ch;
struct dma_chan *dma_rx_ch;
struct dma_chan *dma_ecc_ch;
struct sg_table dma_data_sg;
struct sg_table dma_ecc_sg;
u8 *ecc_buf;
int dma_ecc_len;
struct completion complete;
struct completion dma_data_complete;
struct completion dma_ecc_complete;
u8 cs_assigned;
int cs_sel;
};
static inline struct stm32_fmc2_nfc *to_stm32_nfc(struct nand_controller *base)
{
return container_of(base, struct stm32_fmc2_nfc, base);
}
static void stm32_fmc2_nfc_timings_init(struct nand_chip *chip)
{
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
struct stm32_fmc2_timings *timings = &nand->timings;
u32 pmem, patt;
/* Set tclr/tar timings */
regmap_update_bits(nfc->regmap, FMC2_PCR,
FMC2_PCR_TCLR | FMC2_PCR_TAR,
FIELD_PREP(FMC2_PCR_TCLR, timings->tclr) |
FIELD_PREP(FMC2_PCR_TAR, timings->tar));
/* Set tset/twait/thold/thiz timings in common bank */
pmem = FIELD_PREP(FMC2_PMEM_MEMSET, timings->tset_mem);
pmem |= FIELD_PREP(FMC2_PMEM_MEMWAIT, timings->twait);
pmem |= FIELD_PREP(FMC2_PMEM_MEMHOLD, timings->thold_mem);
pmem |= FIELD_PREP(FMC2_PMEM_MEMHIZ, timings->thiz);
regmap_write(nfc->regmap, FMC2_PMEM, pmem);
/* Set tset/twait/thold/thiz timings in attribut bank */
patt = FIELD_PREP(FMC2_PATT_ATTSET, timings->tset_att);
patt |= FIELD_PREP(FMC2_PATT_ATTWAIT, timings->twait);
patt |= FIELD_PREP(FMC2_PATT_ATTHOLD, timings->thold_att);
patt |= FIELD_PREP(FMC2_PATT_ATTHIZ, timings->thiz);
regmap_write(nfc->regmap, FMC2_PATT, patt);
}
static void stm32_fmc2_nfc_setup(struct nand_chip *chip)
{
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
u32 pcr = 0, pcr_mask;
/* Configure ECC algorithm (default configuration is Hamming) */
pcr_mask = FMC2_PCR_ECCALG;
pcr_mask |= FMC2_PCR_BCHECC;
if (chip->ecc.strength == FMC2_ECC_BCH8) {
pcr |= FMC2_PCR_ECCALG;
pcr |= FMC2_PCR_BCHECC;
} else if (chip->ecc.strength == FMC2_ECC_BCH4) {
pcr |= FMC2_PCR_ECCALG;
}
/* Set buswidth */
pcr_mask |= FMC2_PCR_PWID;
if (chip->options & NAND_BUSWIDTH_16)
pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16);
/* Set ECC sector size */
pcr_mask |= FMC2_PCR_ECCSS;
pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_512);
regmap_update_bits(nfc->regmap, FMC2_PCR, pcr_mask, pcr);
}
static int stm32_fmc2_nfc_select_chip(struct nand_chip *chip, int chipnr)
{
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
struct dma_slave_config dma_cfg;
int ret;
if (nand->cs_used[chipnr] == nfc->cs_sel)
return 0;
nfc->cs_sel = nand->cs_used[chipnr];
stm32_fmc2_nfc_setup(chip);
stm32_fmc2_nfc_timings_init(chip);
if (nfc->dma_tx_ch && nfc->dma_rx_ch) {
memset(&dma_cfg, 0, sizeof(dma_cfg));
dma_cfg.src_addr = nfc->data_phys_addr[nfc->cs_sel];
dma_cfg.dst_addr = nfc->data_phys_addr[nfc->cs_sel];
dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_cfg.src_maxburst = 32;
dma_cfg.dst_maxburst = 32;
ret = dmaengine_slave_config(nfc->dma_tx_ch, &dma_cfg);
if (ret) {
dev_err(nfc->dev, "tx DMA engine slave config failed\n");
return ret;
}
ret = dmaengine_slave_config(nfc->dma_rx_ch, &dma_cfg);
if (ret) {
dev_err(nfc->dev, "rx DMA engine slave config failed\n");
return ret;
}
}
if (nfc->dma_ecc_ch) {
/*
* Hamming: we read HECCR register
* BCH4/BCH8: we read BCHDSRSx registers
*/
memset(&dma_cfg, 0, sizeof(dma_cfg));
dma_cfg.src_addr = nfc->io_phys_addr;
dma_cfg.src_addr += chip->ecc.strength == FMC2_ECC_HAM ?
FMC2_HECCR : FMC2_BCHDSR0;
dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
ret = dmaengine_slave_config(nfc->dma_ecc_ch, &dma_cfg);
if (ret) {
dev_err(nfc->dev, "ECC DMA engine slave config failed\n");
return ret;
}
/* Calculate ECC length needed for one sector */
nfc->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ?
FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN;
}
return 0;
}
static void stm32_fmc2_nfc_set_buswidth_16(struct stm32_fmc2_nfc *nfc, bool set)
{
u32 pcr;
pcr = set ? FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16) :
FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_8);
regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_PWID, pcr);
}
static void stm32_fmc2_nfc_set_ecc(struct stm32_fmc2_nfc *nfc, bool enable)
{
regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_ECCEN,
enable ? FMC2_PCR_ECCEN : 0);
}
static void stm32_fmc2_nfc_enable_seq_irq(struct stm32_fmc2_nfc *nfc)
{
nfc->irq_state = FMC2_IRQ_SEQ;
regmap_update_bits(nfc->regmap, FMC2_CSQIER,
FMC2_CSQIER_TCIE, FMC2_CSQIER_TCIE);
}
static void stm32_fmc2_nfc_disable_seq_irq(struct stm32_fmc2_nfc *nfc)
{
regmap_update_bits(nfc->regmap, FMC2_CSQIER, FMC2_CSQIER_TCIE, 0);
nfc->irq_state = FMC2_IRQ_UNKNOWN;
}
static void stm32_fmc2_nfc_clear_seq_irq(struct stm32_fmc2_nfc *nfc)
{
regmap_write(nfc->regmap, FMC2_CSQICR, FMC2_CSQICR_CLEAR_IRQ);
}
static void stm32_fmc2_nfc_enable_bch_irq(struct stm32_fmc2_nfc *nfc, int mode)
{
nfc->irq_state = FMC2_IRQ_BCH;
if (mode == NAND_ECC_WRITE)
regmap_update_bits(nfc->regmap, FMC2_BCHIER,
FMC2_BCHIER_EPBRIE, FMC2_BCHIER_EPBRIE);
else
regmap_update_bits(nfc->regmap, FMC2_BCHIER,
FMC2_BCHIER_DERIE, FMC2_BCHIER_DERIE);
}
static void stm32_fmc2_nfc_disable_bch_irq(struct stm32_fmc2_nfc *nfc)
{
regmap_update_bits(nfc->regmap, FMC2_BCHIER,
FMC2_BCHIER_DERIE | FMC2_BCHIER_EPBRIE, 0);
nfc->irq_state = FMC2_IRQ_UNKNOWN;
}
static void stm32_fmc2_nfc_clear_bch_irq(struct stm32_fmc2_nfc *nfc)
{
regmap_write(nfc->regmap, FMC2_BCHICR, FMC2_BCHICR_CLEAR_IRQ);
}
/*
* Enable ECC logic and reset syndrome/parity bits previously calculated
* Syndrome/parity bits is cleared by setting the ECCEN bit to 0
*/
static void stm32_fmc2_nfc_hwctl(struct nand_chip *chip, int mode)
{
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
stm32_fmc2_nfc_set_ecc(nfc, false);
if (chip->ecc.strength != FMC2_ECC_HAM) {
regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_WEN,
mode == NAND_ECC_WRITE ? FMC2_PCR_WEN : 0);
reinit_completion(&nfc->complete);
stm32_fmc2_nfc_clear_bch_irq(nfc);
stm32_fmc2_nfc_enable_bch_irq(nfc, mode);
}
stm32_fmc2_nfc_set_ecc(nfc, true);
}
/*
* ECC Hamming calculation
* ECC is 3 bytes for 512 bytes of data (supports error correction up to
* max of 1-bit)
*/
static void stm32_fmc2_nfc_ham_set_ecc(const u32 ecc_sta, u8 *ecc)
{
ecc[0] = ecc_sta;
ecc[1] = ecc_sta >> 8;
ecc[2] = ecc_sta >> 16;
}
static int stm32_fmc2_nfc_ham_calculate(struct nand_chip *chip, const u8 *data,
u8 *ecc)
{
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
u32 sr, heccr;
int ret;
ret = regmap_read_poll_timeout(nfc->regmap, FMC2_SR, sr,
sr & FMC2_SR_NWRF, 1,
1000 * FMC2_TIMEOUT_MS);
if (ret) {
dev_err(nfc->dev, "ham timeout\n");
return ret;
}
regmap_read(nfc->regmap, FMC2_HECCR, &heccr);
stm32_fmc2_nfc_ham_set_ecc(heccr, ecc);
stm32_fmc2_nfc_set_ecc(nfc, false);
return 0;
}
static int stm32_fmc2_nfc_ham_correct(struct nand_chip *chip, u8 *dat,
u8 *read_ecc, u8 *calc_ecc)
{
u8 bit_position = 0, b0, b1, b2;
u32 byte_addr = 0, b;
u32 i, shifting = 1;
/* Indicate which bit and byte is faulty (if any) */
b0 = read_ecc[0] ^ calc_ecc[0];
b1 = read_ecc[1] ^ calc_ecc[1];
b2 = read_ecc[2] ^ calc_ecc[2];
b = b0 | (b1 << 8) | (b2 << 16);
/* No errors */
if (likely(!b))
return 0;
/* Calculate bit position */
for (i = 0; i < 3; i++) {
switch (b % 4) {
case 2:
bit_position += shifting;
break;
case 1:
break;
default:
return -EBADMSG;
}
shifting <<= 1;
b >>= 2;
}
/* Calculate byte position */
shifting = 1;
for (i = 0; i < 9; i++) {
switch (b % 4) {
case 2:
byte_addr += shifting;
break;
case 1:
break;
default:
return -EBADMSG;
}
shifting <<= 1;
b >>= 2;
}
/* Flip the bit */
dat[byte_addr] ^= (1 << bit_position);
return 1;
}
/*
* ECC BCH calculation and correction
* ECC is 7/13 bytes for 512 bytes of data (supports error correction up to
* max of 4-bit/8-bit)
*/
static int stm32_fmc2_nfc_bch_calculate(struct nand_chip *chip, const u8 *data,
u8 *ecc)
{
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
u32 bchpbr;
/* Wait until the BCH code is ready */
if (!wait_for_completion_timeout(&nfc->complete,
msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
dev_err(nfc->dev, "bch timeout\n");
stm32_fmc2_nfc_disable_bch_irq(nfc);
return -ETIMEDOUT;
}
/* Read parity bits */
regmap_read(nfc->regmap, FMC2_BCHPBR1, &bchpbr);
ecc[0] = bchpbr;
ecc[1] = bchpbr >> 8;
ecc[2] = bchpbr >> 16;
ecc[3] = bchpbr >> 24;
regmap_read(nfc->regmap, FMC2_BCHPBR2, &bchpbr);
ecc[4] = bchpbr;
ecc[5] = bchpbr >> 8;
ecc[6] = bchpbr >> 16;
if (chip->ecc.strength == FMC2_ECC_BCH8) {
ecc[7] = bchpbr >> 24;
regmap_read(nfc->regmap, FMC2_BCHPBR3, &bchpbr);
ecc[8] = bchpbr;
ecc[9] = bchpbr >> 8;
ecc[10] = bchpbr >> 16;
ecc[11] = bchpbr >> 24;
regmap_read(nfc->regmap, FMC2_BCHPBR4, &bchpbr);
ecc[12] = bchpbr;
}
stm32_fmc2_nfc_set_ecc(nfc, false);
return 0;
}
static int stm32_fmc2_nfc_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
{
u32 bchdsr0 = ecc_sta[0];
u32 bchdsr1 = ecc_sta[1];
u32 bchdsr2 = ecc_sta[2];
u32 bchdsr3 = ecc_sta[3];
u32 bchdsr4 = ecc_sta[4];
u16 pos[8];
int i, den;
unsigned int nb_errs = 0;
/* No errors found */
if (likely(!(bchdsr0 & FMC2_BCHDSR0_DEF)))
return 0;
/* Too many errors detected */
if (unlikely(bchdsr0 & FMC2_BCHDSR0_DUE))
return -EBADMSG;
pos[0] = FIELD_GET(FMC2_BCHDSR1_EBP1, bchdsr1);
pos[1] = FIELD_GET(FMC2_BCHDSR1_EBP2, bchdsr1);
pos[2] = FIELD_GET(FMC2_BCHDSR2_EBP3, bchdsr2);
pos[3] = FIELD_GET(FMC2_BCHDSR2_EBP4, bchdsr2);
pos[4] = FIELD_GET(FMC2_BCHDSR3_EBP5, bchdsr3);
pos[5] = FIELD_GET(FMC2_BCHDSR3_EBP6, bchdsr3);
pos[6] = FIELD_GET(FMC2_BCHDSR4_EBP7, bchdsr4);
pos[7] = FIELD_GET(FMC2_BCHDSR4_EBP8, bchdsr4);
den = FIELD_GET(FMC2_BCHDSR0_DEN, bchdsr0);
for (i = 0; i < den; i++) {
if (pos[i] < eccsize * 8) {
change_bit(pos[i], (unsigned long *)dat);
nb_errs++;
}
}
return nb_errs;
}
static int stm32_fmc2_nfc_bch_correct(struct nand_chip *chip, u8 *dat,
u8 *read_ecc, u8 *calc_ecc)
{
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
u32 ecc_sta[5];
/* Wait until the decoding error is ready */
if (!wait_for_completion_timeout(&nfc->complete,
msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
dev_err(nfc->dev, "bch timeout\n");
stm32_fmc2_nfc_disable_bch_irq(nfc);
return -ETIMEDOUT;
}
regmap_bulk_read(nfc->regmap, FMC2_BCHDSR0, ecc_sta, 5);
stm32_fmc2_nfc_set_ecc(nfc, false);
return stm32_fmc2_nfc_bch_decode(chip->ecc.size, dat, ecc_sta);
}
static int stm32_fmc2_nfc_read_page(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret, i, s, stat, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
int eccstrength = chip->ecc.strength;
u8 *p = buf;
u8 *ecc_calc = chip->ecc.calc_buf;
u8 *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
ret = nand_read_page_op(chip, page, 0, NULL, 0);
if (ret)
return ret;
for (i = mtd->writesize + FMC2_BBM_LEN, s = 0; s < eccsteps;
s++, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(chip, NAND_ECC_READ);
/* Read the nand page sector (512 bytes) */
ret = nand_change_read_column_op(chip, s * eccsize, p,
eccsize, false);
if (ret)
return ret;
/* Read the corresponding ECC bytes */
ret = nand_change_read_column_op(chip, i, ecc_code,
eccbytes, false);
if (ret)
return ret;
/* Correct the data */
stat = chip->ecc.correct(chip, p, ecc_code, ecc_calc);
if (stat == -EBADMSG)
/* Check for empty pages with bitflips */
stat = nand_check_erased_ecc_chunk(p, eccsize,
ecc_code, eccbytes,
NULL, 0,
eccstrength);
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
/* Read oob */
if (oob_required) {
ret = nand_change_read_column_op(chip, mtd->writesize,
chip->oob_poi, mtd->oobsize,
false);
if (ret)
return ret;
}
return max_bitflips;
}
/* Sequencer read/write configuration */
static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page,
int raw, bool write_data)
{
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
u32 ecc_offset = mtd->writesize + FMC2_BBM_LEN;
/*
* cfg[0] => csqcfgr1, cfg[1] => csqcfgr2, cfg[2] => csqcfgr3
* cfg[3] => csqar1, cfg[4] => csqar2
*/
u32 cfg[5];
regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_WEN,
write_data ? FMC2_PCR_WEN : 0);
/*
* - Set Program Page/Page Read command
* - Enable DMA request data
* - Set timings
*/
cfg[0] = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T;
if (write_data)
cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_SEQIN);
else
cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_READ0) |
FMC2_CSQCFGR1_CMD2EN |
FIELD_PREP(FMC2_CSQCFGR1_CMD2, NAND_CMD_READSTART) |
FMC2_CSQCFGR1_CMD2T;
/*
* - Set Random Data Input/Random Data Read command
* - Enable the sequencer to access the Spare data area
* - Enable DMA request status decoding for read
* - Set timings
*/
if (write_data)
cfg[1] = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDIN);
else
cfg[1] = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDOUT) |
FMC2_CSQCFGR2_RCMD2EN |
FIELD_PREP(FMC2_CSQCFGR2_RCMD2, NAND_CMD_RNDOUTSTART) |
FMC2_CSQCFGR2_RCMD1T |
FMC2_CSQCFGR2_RCMD2T;
if (!raw) {
cfg[1] |= write_data ? 0 : FMC2_CSQCFGR2_DMASEN;
cfg[1] |= FMC2_CSQCFGR2_SQSDTEN;
}
/*
* - Set the number of sectors to be written
* - Set timings
*/
cfg[2] = FIELD_PREP(FMC2_CSQCFGR3_SNBR, chip->ecc.steps - 1);
if (write_data) {
cfg[2] |= FMC2_CSQCFGR3_RAC2T;
if (chip->options & NAND_ROW_ADDR_3)
cfg[2] |= FMC2_CSQCFGR3_AC5T;
else
cfg[2] |= FMC2_CSQCFGR3_AC4T;
}
/*
* Set the fourth first address cycles
* Byte 1 and byte 2 => column, we start at 0x0
* Byte 3 and byte 4 => page
*/
cfg[3] = FIELD_PREP(FMC2_CSQCAR1_ADDC3, page);
cfg[3] |= FIELD_PREP(FMC2_CSQCAR1_ADDC4, page >> 8);
/*
* - Set chip enable number
* - Set ECC byte offset in the spare area
* - Calculate the number of address cycles to be issued
* - Set byte 5 of address cycle if needed
*/
cfg[4] = FIELD_PREP(FMC2_CSQCAR2_NANDCEN, nfc->cs_sel);
if (chip->options & NAND_BUSWIDTH_16)
cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset >> 1);
else
cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset);
if (chip->options & NAND_ROW_ADDR_3) {
cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 5);
cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_ADDC5, page >> 16);
} else {
cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 4);
}
regmap_bulk_write(nfc->regmap, FMC2_CSQCFGR1, cfg, 5);
}
static void stm32_fmc2_nfc_dma_callback(void *arg)
{
complete((struct completion *)arg);
}
/* Read/write data from/to a page */
static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
int raw, bool write_data)
{
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
struct dma_async_tx_descriptor *desc_data, *desc_ecc;
struct scatterlist *sg;
struct dma_chan *dma_ch = nfc->dma_rx_ch;
enum dma_data_direction dma_data_dir = DMA_FROM_DEVICE;
enum dma_transfer_direction dma_transfer_dir = DMA_DEV_TO_MEM;
int eccsteps = chip->ecc.steps;
int eccsize = chip->ecc.size;
unsigned long timeout = msecs_to_jiffies(FMC2_TIMEOUT_MS);
const u8 *p = buf;
int s, ret;
/* Configure DMA data */
if (write_data) {
dma_data_dir = DMA_TO_DEVICE;
dma_transfer_dir = DMA_MEM_TO_DEV;
dma_ch = nfc->dma_tx_ch;
}
for_each_sg(nfc->dma_data_sg.sgl, sg, eccsteps, s) {
sg_set_buf(sg, p, eccsize);
p += eccsize;
}
ret = dma_map_sg(nfc->dev, nfc->dma_data_sg.sgl,
eccsteps, dma_data_dir);
if (!ret)
return -EIO;
desc_data = dmaengine_prep_slave_sg(dma_ch, nfc->dma_data_sg.sgl,
eccsteps, dma_transfer_dir,
DMA_PREP_INTERRUPT);
if (!desc_data) {
ret = -ENOMEM;
goto err_unmap_data;
}
reinit_completion(&nfc->dma_data_complete);
reinit_completion(&nfc->complete);
desc_data->callback = stm32_fmc2_nfc_dma_callback;
desc_data->callback_param = &nfc->dma_data_complete;
ret = dma_submit_error(dmaengine_submit(desc_data));
if (ret)
goto err_unmap_data;
dma_async_issue_pending(dma_ch);
if (!write_data && !raw) {
/* Configure DMA ECC status */
p = nfc->ecc_buf;
for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) {
sg_set_buf(sg, p, nfc->dma_ecc_len);
p += nfc->dma_ecc_len;
}
ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
eccsteps, dma_data_dir);
if (!ret) {
ret = -EIO;
goto err_unmap_data;
}
desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
nfc->dma_ecc_sg.sgl,
eccsteps, dma_transfer_dir,
DMA_PREP_INTERRUPT);
if (!desc_ecc) {
ret = -ENOMEM;
goto err_unmap_ecc;
}
reinit_completion(&nfc->dma_ecc_complete);
desc_ecc->callback = stm32_fmc2_nfc_dma_callback;
desc_ecc->callback_param = &nfc->dma_ecc_complete;
ret = dma_submit_error(dmaengine_submit(desc_ecc));
if (ret)
goto err_unmap_ecc;
dma_async_issue_pending(nfc->dma_ecc_ch);
}
stm32_fmc2_nfc_clear_seq_irq(nfc);
stm32_fmc2_nfc_enable_seq_irq(nfc);
/* Start the transfer */
regmap_update_bits(nfc->regmap, FMC2_CSQCR,
FMC2_CSQCR_CSQSTART, FMC2_CSQCR_CSQSTART);
/* Wait end of sequencer transfer */
if (!wait_for_completion_timeout(&nfc->complete, timeout)) {
dev_err(nfc->dev, "seq timeout\n");
stm32_fmc2_nfc_disable_seq_irq(nfc);
dmaengine_terminate_all(dma_ch);
if (!write_data && !raw)
dmaengine_terminate_all(nfc->dma_ecc_ch);
ret = -ETIMEDOUT;
goto err_unmap_ecc;
}
/* Wait DMA data transfer completion */
if (!wait_for_completion_timeout(&nfc->dma_data_complete, timeout)) {
dev_err(nfc->dev, "data DMA timeout\n");
dmaengine_terminate_all(dma_ch);
ret = -ETIMEDOUT;
}
/* Wait DMA ECC transfer completion */
if (!write_data && !raw) {
if (!wait_for_completion_timeout(&nfc->dma_ecc_complete,
timeout)) {
dev_err(nfc->dev, "ECC DMA timeout\n");
dmaengine_terminate_all(nfc->dma_ecc_ch);
ret = -ETIMEDOUT;
}
}
err_unmap_ecc:
if (!write_data && !raw)
dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
eccsteps, dma_data_dir);
err_unmap_data:
dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir);
return ret;
}
static int stm32_fmc2_nfc_seq_write(struct nand_chip *chip, const u8 *buf,
int oob_required, int page, int raw)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
/* Configure the sequencer */
stm32_fmc2_nfc_rw_page_init(chip, page, raw, true);
/* Write the page */
ret = stm32_fmc2_nfc_xfer(chip, buf, raw, true);
if (ret)
return ret;
/* Write oob */
if (oob_required) {
ret = nand_change_write_column_op(chip, mtd->writesize,
chip->oob_poi, mtd->oobsize,
false);
if (ret)
return ret;
}
return nand_prog_page_end_op(chip);
}
static int stm32_fmc2_nfc_seq_write_page(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
int ret;
ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
if (ret)
return ret;
return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, false);
}
static int stm32_fmc2_nfc_seq_write_page_raw(struct nand_chip *chip,
const u8 *buf, int oob_required,
int page)
{
int ret;
ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
if (ret)
return ret;
return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, true);
}
/* Get a status indicating which sectors have errors */
static u16 stm32_fmc2_nfc_get_mapping_status(struct stm32_fmc2_nfc *nfc)
{
u32 csqemsr;
regmap_read(nfc->regmap, FMC2_CSQEMSR, &csqemsr);
return FIELD_GET(FMC2_CSQEMSR_SEM, csqemsr);
}
static int stm32_fmc2_nfc_seq_correct(struct nand_chip *chip, u8 *dat,
u8 *read_ecc, u8 *calc_ecc)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
int eccstrength = chip->ecc.strength;
int i, s, eccsize = chip->ecc.size;
u32 *ecc_sta = (u32 *)nfc->ecc_buf;
u16 sta_map = stm32_fmc2_nfc_get_mapping_status(nfc);
unsigned int max_bitflips = 0;
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, dat += eccsize) {
int stat = 0;
if (eccstrength == FMC2_ECC_HAM) {
/* Ecc_sta = FMC2_HECCR */
if (sta_map & BIT(s)) {
stm32_fmc2_nfc_ham_set_ecc(*ecc_sta,
&calc_ecc[i]);
stat = stm32_fmc2_nfc_ham_correct(chip, dat,
&read_ecc[i],
&calc_ecc[i]);
}
ecc_sta++;
} else {
/*
* Ecc_sta[0] = FMC2_BCHDSR0
* Ecc_sta[1] = FMC2_BCHDSR1
* Ecc_sta[2] = FMC2_BCHDSR2
* Ecc_sta[3] = FMC2_BCHDSR3
* Ecc_sta[4] = FMC2_BCHDSR4
*/
if (sta_map & BIT(s))
stat = stm32_fmc2_nfc_bch_decode(eccsize, dat,
ecc_sta);
ecc_sta += 5;
}
if (stat == -EBADMSG)
/* Check for empty pages with bitflips */
stat = nand_check_erased_ecc_chunk(dat, eccsize,
&read_ecc[i],
eccbytes,
NULL, 0,
eccstrength);
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
return max_bitflips;
}
static int stm32_fmc2_nfc_seq_read_page(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
u8 *ecc_calc = chip->ecc.calc_buf;
u8 *ecc_code = chip->ecc.code_buf;
u16 sta_map;
int ret;
ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
if (ret)
return ret;
/* Configure the sequencer */
stm32_fmc2_nfc_rw_page_init(chip, page, 0, false);
/* Read the page */
ret = stm32_fmc2_nfc_xfer(chip, buf, 0, false);
if (ret)
return ret;
sta_map = stm32_fmc2_nfc_get_mapping_status(nfc);
/* Check if errors happen */
if (likely(!sta_map)) {
if (oob_required)
return nand_change_read_column_op(chip, mtd->writesize,
chip->oob_poi,
mtd->oobsize, false);
return 0;
}
/* Read oob */
ret = nand_change_read_column_op(chip, mtd->writesize,
chip->oob_poi, mtd->oobsize, false);
if (ret)
return ret;
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
/* Correct data */
return chip->ecc.correct(chip, buf, ecc_code, ecc_calc);
}
static int stm32_fmc2_nfc_seq_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
if (ret)
return ret;
/* Configure the sequencer */
stm32_fmc2_nfc_rw_page_init(chip, page, 1, false);
/* Read the page */
ret = stm32_fmc2_nfc_xfer(chip, buf, 1, false);
if (ret)
return ret;
/* Read oob */
if (oob_required)
return nand_change_read_column_op(chip, mtd->writesize,
chip->oob_poi, mtd->oobsize,
false);
return 0;
}
static irqreturn_t stm32_fmc2_nfc_irq(int irq, void *dev_id)
{
struct stm32_fmc2_nfc *nfc = (struct stm32_fmc2_nfc *)dev_id;
if (nfc->irq_state == FMC2_IRQ_SEQ)
/* Sequencer is used */
stm32_fmc2_nfc_disable_seq_irq(nfc);
else if (nfc->irq_state == FMC2_IRQ_BCH)
/* BCH is used */
stm32_fmc2_nfc_disable_bch_irq(nfc);
complete(&nfc->complete);
return IRQ_HANDLED;
}
static void stm32_fmc2_nfc_read_data(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit)
{
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
void __iomem *io_addr_r = nfc->data_base[nfc->cs_sel];
if (force_8bit && chip->options & NAND_BUSWIDTH_16)
/* Reconfigure bus width to 8-bit */
stm32_fmc2_nfc_set_buswidth_16(nfc, false);
if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
*(u8 *)buf = readb_relaxed(io_addr_r);
buf += sizeof(u8);
len -= sizeof(u8);
}
if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
len >= sizeof(u16)) {
*(u16 *)buf = readw_relaxed(io_addr_r);
buf += sizeof(u16);
len -= sizeof(u16);
}
}
/* Buf is aligned */
while (len >= sizeof(u32)) {
*(u32 *)buf = readl_relaxed(io_addr_r);
buf += sizeof(u32);
len -= sizeof(u32);
}
/* Read remaining bytes */
if (len >= sizeof(u16)) {
*(u16 *)buf = readw_relaxed(io_addr_r);
buf += sizeof(u16);
len -= sizeof(u16);
}
if (len)
*(u8 *)buf = readb_relaxed(io_addr_r);
if (force_8bit && chip->options & NAND_BUSWIDTH_16)
/* Reconfigure bus width to 16-bit */
stm32_fmc2_nfc_set_buswidth_16(nfc, true);
}
static void stm32_fmc2_nfc_write_data(struct nand_chip *chip, const void *buf,
unsigned int len, bool force_8bit)
{
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
void __iomem *io_addr_w = nfc->data_base[nfc->cs_sel];
if (force_8bit && chip->options & NAND_BUSWIDTH_16)
/* Reconfigure bus width to 8-bit */
stm32_fmc2_nfc_set_buswidth_16(nfc, false);
if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
writeb_relaxed(*(u8 *)buf, io_addr_w);
buf += sizeof(u8);
len -= sizeof(u8);
}
if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
len >= sizeof(u16)) {
writew_relaxed(*(u16 *)buf, io_addr_w);
buf += sizeof(u16);
len -= sizeof(u16);
}
}
/* Buf is aligned */
while (len >= sizeof(u32)) {
writel_relaxed(*(u32 *)buf, io_addr_w);
buf += sizeof(u32);
len -= sizeof(u32);
}
/* Write remaining bytes */
if (len >= sizeof(u16)) {
writew_relaxed(*(u16 *)buf, io_addr_w);
buf += sizeof(u16);
len -= sizeof(u16);
}
if (len)
writeb_relaxed(*(u8 *)buf, io_addr_w);
if (force_8bit && chip->options & NAND_BUSWIDTH_16)
/* Reconfigure bus width to 16-bit */
stm32_fmc2_nfc_set_buswidth_16(nfc, true);
}
static int stm32_fmc2_nfc_waitrdy(struct nand_chip *chip,
unsigned long timeout_ms)
{
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
const struct nand_sdr_timings *timings;
u32 isr, sr;
/* Check if there is no pending requests to the NAND flash */
if (regmap_read_poll_timeout(nfc->regmap, FMC2_SR, sr,
sr & FMC2_SR_NWRF, 1,
1000 * FMC2_TIMEOUT_MS))
dev_warn(nfc->dev, "Waitrdy timeout\n");
/* Wait tWB before R/B# signal is low */
timings = nand_get_sdr_timings(nand_get_interface_config(chip));
ndelay(PSEC_TO_NSEC(timings->tWB_max));
/* R/B# signal is low, clear high level flag */
regmap_write(nfc->regmap, FMC2_ICR, FMC2_ICR_CIHLF);
/* Wait R/B# signal is high */
return regmap_read_poll_timeout(nfc->regmap, FMC2_ISR, isr,
isr & FMC2_ISR_IHLF, 5,
1000 * FMC2_TIMEOUT_MS);
}
static int stm32_fmc2_nfc_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
const struct nand_op_instr *instr = NULL;
unsigned int op_id, i, timeout;
int ret;
if (check_only)
return 0;
ret = stm32_fmc2_nfc_select_chip(chip, op->cs);
if (ret)
return ret;
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
switch (instr->type) {
case NAND_OP_CMD_INSTR:
writeb_relaxed(instr->ctx.cmd.opcode,
nfc->cmd_base[nfc->cs_sel]);
break;
case NAND_OP_ADDR_INSTR:
for (i = 0; i < instr->ctx.addr.naddrs; i++)
writeb_relaxed(instr->ctx.addr.addrs[i],
nfc->addr_base[nfc->cs_sel]);
break;
case NAND_OP_DATA_IN_INSTR:
stm32_fmc2_nfc_read_data(chip, instr->ctx.data.buf.in,
instr->ctx.data.len,
instr->ctx.data.force_8bit);
break;
case NAND_OP_DATA_OUT_INSTR:
stm32_fmc2_nfc_write_data(chip, instr->ctx.data.buf.out,
instr->ctx.data.len,
instr->ctx.data.force_8bit);
break;
case NAND_OP_WAITRDY_INSTR:
timeout = instr->ctx.waitrdy.timeout_ms;
ret = stm32_fmc2_nfc_waitrdy(chip, timeout);
break;
}
}
return ret;
}
static void stm32_fmc2_nfc_init(struct stm32_fmc2_nfc *nfc)
{
u32 pcr;
regmap_read(nfc->regmap, FMC2_PCR, &pcr);
/* Set CS used to undefined */
nfc->cs_sel = -1;
/* Enable wait feature and nand flash memory bank */
pcr |= FMC2_PCR_PWAITEN;
pcr |= FMC2_PCR_PBKEN;
/* Set buswidth to 8 bits mode for identification */
pcr &= ~FMC2_PCR_PWID;
/* ECC logic is disabled */
pcr &= ~FMC2_PCR_ECCEN;
/* Default mode */
pcr &= ~FMC2_PCR_ECCALG;
pcr &= ~FMC2_PCR_BCHECC;
pcr &= ~FMC2_PCR_WEN;
/* Set default ECC sector size */
pcr &= ~FMC2_PCR_ECCSS;
pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_2048);
/* Set default tclr/tar timings */
pcr &= ~FMC2_PCR_TCLR;
pcr |= FIELD_PREP(FMC2_PCR_TCLR, FMC2_PCR_TCLR_DEFAULT);
pcr &= ~FMC2_PCR_TAR;
pcr |= FIELD_PREP(FMC2_PCR_TAR, FMC2_PCR_TAR_DEFAULT);
/* Enable FMC2 controller */
if (nfc->dev == nfc->cdev)
regmap_update_bits(nfc->regmap, FMC2_BCR1,
FMC2_BCR1_FMC2EN, FMC2_BCR1_FMC2EN);
regmap_write(nfc->regmap, FMC2_PCR, pcr);
regmap_write(nfc->regmap, FMC2_PMEM, FMC2_PMEM_DEFAULT);
regmap_write(nfc->regmap, FMC2_PATT, FMC2_PATT_DEFAULT);
}
static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip,
const struct nand_sdr_timings *sdrt)
{
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
struct stm32_fmc2_timings *tims = &nand->timings;
unsigned long hclk = clk_get_rate(nfc->clk);
unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000);
unsigned long timing, tar, tclr, thiz, twait;
unsigned long tset_mem, tset_att, thold_mem, thold_att;
tar = max_t(unsigned long, hclkp, sdrt->tAR_min);
timing = DIV_ROUND_UP(tar, hclkp) - 1;
tims->tar = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK);
tclr = max_t(unsigned long, hclkp, sdrt->tCLR_min);
timing = DIV_ROUND_UP(tclr, hclkp) - 1;
tims->tclr = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK);
tims->thiz = FMC2_THIZ;
thiz = (tims->thiz + 1) * hclkp;
/*
* tWAIT > tRP
* tWAIT > tWP
* tWAIT > tREA + tIO
*/
twait = max_t(unsigned long, hclkp, sdrt->tRP_min);
twait = max_t(unsigned long, twait, sdrt->tWP_min);
twait = max_t(unsigned long, twait, sdrt->tREA_max + FMC2_TIO);
timing = DIV_ROUND_UP(twait, hclkp);
tims->twait = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
/*
* tSETUP_MEM > tCS - tWAIT
* tSETUP_MEM > tALS - tWAIT
* tSETUP_MEM > tDS - (tWAIT - tHIZ)
*/
tset_mem = hclkp;
if (sdrt->tCS_min > twait && (tset_mem < sdrt->tCS_min - twait))
tset_mem = sdrt->tCS_min - twait;
if (sdrt->tALS_min > twait && (tset_mem < sdrt->tALS_min - twait))
tset_mem = sdrt->tALS_min - twait;
if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
(tset_mem < sdrt->tDS_min - (twait - thiz)))
tset_mem = sdrt->tDS_min - (twait - thiz);
timing = DIV_ROUND_UP(tset_mem, hclkp);
tims->tset_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
/*
* tHOLD_MEM > tCH
* tHOLD_MEM > tREH - tSETUP_MEM
* tHOLD_MEM > max(tRC, tWC) - (tSETUP_MEM + tWAIT)
*/
thold_mem = max_t(unsigned long, hclkp, sdrt->tCH_min);
if (sdrt->tREH_min > tset_mem &&
(thold_mem < sdrt->tREH_min - tset_mem))
thold_mem = sdrt->tREH_min - tset_mem;
if ((sdrt->tRC_min > tset_mem + twait) &&
(thold_mem < sdrt->tRC_min - (tset_mem + twait)))
thold_mem = sdrt->tRC_min - (tset_mem + twait);
if ((sdrt->tWC_min > tset_mem + twait) &&
(thold_mem < sdrt->tWC_min - (tset_mem + twait)))
thold_mem = sdrt->tWC_min - (tset_mem + twait);
timing = DIV_ROUND_UP(thold_mem, hclkp);
tims->thold_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
/*
* tSETUP_ATT > tCS - tWAIT
* tSETUP_ATT > tCLS - tWAIT
* tSETUP_ATT > tALS - tWAIT
* tSETUP_ATT > tRHW - tHOLD_MEM
* tSETUP_ATT > tDS - (tWAIT - tHIZ)
*/
tset_att = hclkp;
if (sdrt->tCS_min > twait && (tset_att < sdrt->tCS_min - twait))
tset_att = sdrt->tCS_min - twait;
if (sdrt->tCLS_min > twait && (tset_att < sdrt->tCLS_min - twait))
tset_att = sdrt->tCLS_min - twait;
if (sdrt->tALS_min > twait && (tset_att < sdrt->tALS_min - twait))
tset_att = sdrt->tALS_min - twait;
if (sdrt->tRHW_min > thold_mem &&
(tset_att < sdrt->tRHW_min - thold_mem))
tset_att = sdrt->tRHW_min - thold_mem;
if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
(tset_att < sdrt->tDS_min - (twait - thiz)))
tset_att = sdrt->tDS_min - (twait - thiz);
timing = DIV_ROUND_UP(tset_att, hclkp);
tims->tset_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
/*
* tHOLD_ATT > tALH
* tHOLD_ATT > tCH
* tHOLD_ATT > tCLH
* tHOLD_ATT > tCOH
* tHOLD_ATT > tDH
* tHOLD_ATT > tWB + tIO + tSYNC - tSETUP_MEM
* tHOLD_ATT > tADL - tSETUP_MEM
* tHOLD_ATT > tWH - tSETUP_MEM
* tHOLD_ATT > tWHR - tSETUP_MEM
* tHOLD_ATT > tRC - (tSETUP_ATT + tWAIT)
* tHOLD_ATT > tWC - (tSETUP_ATT + tWAIT)
*/
thold_att = max_t(unsigned long, hclkp, sdrt->tALH_min);
thold_att = max_t(unsigned long, thold_att, sdrt->tCH_min);
thold_att = max_t(unsigned long, thold_att, sdrt->tCLH_min);
thold_att = max_t(unsigned long, thold_att, sdrt->tCOH_min);
thold_att = max_t(unsigned long, thold_att, sdrt->tDH_min);
if ((sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC > tset_mem) &&
(thold_att < sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem))
thold_att = sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem;
if (sdrt->tADL_min > tset_mem &&
(thold_att < sdrt->tADL_min - tset_mem))
thold_att = sdrt->tADL_min - tset_mem;
if (sdrt->tWH_min > tset_mem &&
(thold_att < sdrt->tWH_min - tset_mem))
thold_att = sdrt->tWH_min - tset_mem;
if (sdrt->tWHR_min > tset_mem &&
(thold_att < sdrt->tWHR_min - tset_mem))
thold_att = sdrt->tWHR_min - tset_mem;
if ((sdrt->tRC_min > tset_att + twait) &&
(thold_att < sdrt->tRC_min - (tset_att + twait)))
thold_att = sdrt->tRC_min - (tset_att + twait);
if ((sdrt->tWC_min > tset_att + twait) &&
(thold_att < sdrt->tWC_min - (tset_att + twait)))
thold_att = sdrt->tWC_min - (tset_att + twait);
timing = DIV_ROUND_UP(thold_att, hclkp);
tims->thold_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
}
static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
const struct nand_interface_config *conf)
{
const struct nand_sdr_timings *sdrt;
sdrt = nand_get_sdr_timings(conf);
if (IS_ERR(sdrt))
return PTR_ERR(sdrt);
if (conf->timings.mode > 3)
return -EOPNOTSUPP;
if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
stm32_fmc2_nfc_calc_timings(chip, sdrt);
stm32_fmc2_nfc_timings_init(chip);
return 0;
}
static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
{
int ret = 0;
nfc->dma_tx_ch = dma_request_chan(nfc->dev, "tx");
if (IS_ERR(nfc->dma_tx_ch)) {
ret = PTR_ERR(nfc->dma_tx_ch);
if (ret != -ENODEV && ret != -EPROBE_DEFER)
dev_err(nfc->dev,
"failed to request tx DMA channel: %d\n", ret);
nfc->dma_tx_ch = NULL;
goto err_dma;
}
nfc->dma_rx_ch = dma_request_chan(nfc->dev, "rx");
if (IS_ERR(nfc->dma_rx_ch)) {
ret = PTR_ERR(nfc->dma_rx_ch);
if (ret != -ENODEV && ret != -EPROBE_DEFER)
dev_err(nfc->dev,
"failed to request rx DMA channel: %d\n", ret);
nfc->dma_rx_ch = NULL;
goto err_dma;
}
nfc->dma_ecc_ch = dma_request_chan(nfc->dev, "ecc");
if (IS_ERR(nfc->dma_ecc_ch)) {
ret = PTR_ERR(nfc->dma_ecc_ch);
if (ret != -ENODEV && ret != -EPROBE_DEFER)
dev_err(nfc->dev,
"failed to request ecc DMA channel: %d\n", ret);
nfc->dma_ecc_ch = NULL;
goto err_dma;
}
ret = sg_alloc_table(&nfc->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
if (ret)
return ret;
/* Allocate a buffer to store ECC status registers */
nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL);
if (!nfc->ecc_buf)
return -ENOMEM;
ret = sg_alloc_table(&nfc->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL);
if (ret)
return ret;
init_completion(&nfc->dma_data_complete);
init_completion(&nfc->dma_ecc_complete);
return 0;
err_dma:
if (ret == -ENODEV) {
dev_warn(nfc->dev,
"DMAs not defined in the DT, polling mode is used\n");
ret = 0;
}
return ret;
}
static void stm32_fmc2_nfc_nand_callbacks_setup(struct nand_chip *chip)
{
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
/*
* Specific callbacks to read/write a page depending on
* the mode (polling/sequencer) and the algo used (Hamming, BCH).
*/
if (nfc->dma_tx_ch && nfc->dma_rx_ch && nfc->dma_ecc_ch) {
/* DMA => use sequencer mode callbacks */
chip->ecc.correct = stm32_fmc2_nfc_seq_correct;
chip->ecc.write_page = stm32_fmc2_nfc_seq_write_page;
chip->ecc.read_page = stm32_fmc2_nfc_seq_read_page;
chip->ecc.write_page_raw = stm32_fmc2_nfc_seq_write_page_raw;
chip->ecc.read_page_raw = stm32_fmc2_nfc_seq_read_page_raw;
} else {
/* No DMA => use polling mode callbacks */
chip->ecc.hwctl = stm32_fmc2_nfc_hwctl;
if (chip->ecc.strength == FMC2_ECC_HAM) {
/* Hamming is used */
chip->ecc.calculate = stm32_fmc2_nfc_ham_calculate;
chip->ecc.correct = stm32_fmc2_nfc_ham_correct;
chip->ecc.options |= NAND_ECC_GENERIC_ERASED_CHECK;
} else {
/* BCH is used */
chip->ecc.calculate = stm32_fmc2_nfc_bch_calculate;
chip->ecc.correct = stm32_fmc2_nfc_bch_correct;
chip->ecc.read_page = stm32_fmc2_nfc_read_page;
}
}
/* Specific configurations depending on the algo used */
if (chip->ecc.strength == FMC2_ECC_HAM)
chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 4 : 3;
else if (chip->ecc.strength == FMC2_ECC_BCH8)
chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 14 : 13;
else
chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 8 : 7;
}
static int stm32_fmc2_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
if (section)
return -ERANGE;
oobregion->length = ecc->total;
oobregion->offset = FMC2_BBM_LEN;
return 0;
}
static int stm32_fmc2_nfc_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
if (section)
return -ERANGE;
oobregion->length = mtd->oobsize - ecc->total - FMC2_BBM_LEN;
oobregion->offset = ecc->total + FMC2_BBM_LEN;
return 0;
}
static const struct mtd_ooblayout_ops stm32_fmc2_nfc_ooblayout_ops = {
.ecc = stm32_fmc2_nfc_ooblayout_ecc,
.free = stm32_fmc2_nfc_ooblayout_free,
};
static int stm32_fmc2_nfc_calc_ecc_bytes(int step_size, int strength)
{
/* Hamming */
if (strength == FMC2_ECC_HAM)
return 4;
/* BCH8 */
if (strength == FMC2_ECC_BCH8)
return 14;
/* BCH4 */
return 8;
}
NAND_ECC_CAPS_SINGLE(stm32_fmc2_nfc_ecc_caps, stm32_fmc2_nfc_calc_ecc_bytes,
FMC2_ECC_STEP_SIZE,
FMC2_ECC_HAM, FMC2_ECC_BCH4, FMC2_ECC_BCH8);
static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip)
{
struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
/*
* Only NAND_ECC_ENGINE_TYPE_ON_HOST mode is actually supported
* Hamming => ecc.strength = 1
* BCH4 => ecc.strength = 4
* BCH8 => ecc.strength = 8
* ECC sector size = 512
*/
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
dev_err(nfc->dev,
"nand_ecc_engine_type is not well defined in the DT\n");
return -EINVAL;
}
/* Default ECC settings in case they are not set in the device tree */
if (!chip->ecc.size)
chip->ecc.size = FMC2_ECC_STEP_SIZE;
if (!chip->ecc.strength)
chip->ecc.strength = FMC2_ECC_BCH8;
ret = nand_ecc_choose_conf(chip, &stm32_fmc2_nfc_ecc_caps,
mtd->oobsize - FMC2_BBM_LEN);
if (ret) {
dev_err(nfc->dev, "no valid ECC settings set\n");
return ret;
}
if (mtd->writesize / chip->ecc.size > FMC2_MAX_SG) {
dev_err(nfc->dev, "nand page size is not supported\n");
return -EINVAL;
}
if (chip->bbt_options & NAND_BBT_USE_FLASH)
chip->bbt_options |= NAND_BBT_NO_OOB;
stm32_fmc2_nfc_nand_callbacks_setup(chip);
mtd_set_ooblayout(mtd, &stm32_fmc2_nfc_ooblayout_ops);
stm32_fmc2_nfc_setup(chip);
return 0;
}
static const struct nand_controller_ops stm32_fmc2_nfc_controller_ops = {
.attach_chip = stm32_fmc2_nfc_attach_chip,
.exec_op = stm32_fmc2_nfc_exec_op,
.setup_interface = stm32_fmc2_nfc_setup_interface,
};
static void stm32_fmc2_nfc_wp_enable(struct stm32_fmc2_nand *nand)
{
if (nand->wp_gpio)
gpiod_set_value(nand->wp_gpio, 1);
}
static void stm32_fmc2_nfc_wp_disable(struct stm32_fmc2_nand *nand)
{
if (nand->wp_gpio)
gpiod_set_value(nand->wp_gpio, 0);
}
static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
struct device_node *dn)
{
struct stm32_fmc2_nand *nand = &nfc->nand;
u32 cs;
int ret, i;
if (!of_get_property(dn, "reg", &nand->ncs))
return -EINVAL;
nand->ncs /= sizeof(u32);
if (!nand->ncs) {
dev_err(nfc->dev, "invalid reg property size\n");
return -EINVAL;
}
for (i = 0; i < nand->ncs; i++) {
ret = of_property_read_u32_index(dn, "reg", i, &cs);
if (ret) {
dev_err(nfc->dev, "could not retrieve reg property: %d\n",
ret);
return ret;
}
if (cs >= FMC2_MAX_CE) {
dev_err(nfc->dev, "invalid reg value: %d\n", cs);
return -EINVAL;
}
if (nfc->cs_assigned & BIT(cs)) {
dev_err(nfc->dev, "cs already assigned: %d\n", cs);
return -EINVAL;
}
nfc->cs_assigned |= BIT(cs);
nand->cs_used[i] = cs;
}
nand->wp_gpio = devm_fwnode_gpiod_get(nfc->dev, of_fwnode_handle(dn),
"wp", GPIOD_OUT_HIGH, "wp");
if (IS_ERR(nand->wp_gpio)) {
ret = PTR_ERR(nand->wp_gpio);
if (ret != -ENOENT)
return dev_err_probe(nfc->dev, ret,
"failed to request WP GPIO\n");
nand->wp_gpio = NULL;
}
nand_set_flash_node(&nand->chip, dn);
return 0;
}
static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc)
{
struct device_node *dn = nfc->dev->of_node;
struct device_node *child;
int nchips = of_get_child_count(dn);
int ret = 0;
if (!nchips) {
dev_err(nfc->dev, "NAND chip not defined\n");
return -EINVAL;
}
if (nchips > 1) {
dev_err(nfc->dev, "too many NAND chips defined\n");
return -EINVAL;
}
for_each_child_of_node(dn, child) {
ret = stm32_fmc2_nfc_parse_child(nfc, child);
if (ret < 0) {
of_node_put(child);
return ret;
}
}
return ret;
}
static int stm32_fmc2_nfc_set_cdev(struct stm32_fmc2_nfc *nfc)
{
struct device *dev = nfc->dev;
bool ebi_found = false;
if (dev->parent && of_device_is_compatible(dev->parent->of_node,
"st,stm32mp1-fmc2-ebi"))
ebi_found = true;
if (of_device_is_compatible(dev->of_node, "st,stm32mp1-fmc2-nfc")) {
if (ebi_found) {
nfc->cdev = dev->parent;
return 0;
}
return -EINVAL;
}
if (ebi_found)
return -EINVAL;
nfc->cdev = dev;
return 0;
}
static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct reset_control *rstc;
struct stm32_fmc2_nfc *nfc;
struct stm32_fmc2_nand *nand;
struct resource *res;
struct mtd_info *mtd;
struct nand_chip *chip;
struct resource cres;
int chip_cs, mem_region, ret, irq;
int start_region = 0;
nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
if (!nfc)
return -ENOMEM;
nfc->dev = dev;
nand_controller_init(&nfc->base);
nfc->base.ops = &stm32_fmc2_nfc_controller_ops;
ret = stm32_fmc2_nfc_set_cdev(nfc);
if (ret)
return ret;
ret = stm32_fmc2_nfc_parse_dt(nfc);
if (ret)
return ret;
ret = of_address_to_resource(nfc->cdev->of_node, 0, &cres);
if (ret)
return ret;
nfc->io_phys_addr = cres.start;
nfc->regmap = device_node_to_regmap(nfc->cdev->of_node);
if (IS_ERR(nfc->regmap))
return PTR_ERR(nfc->regmap);
if (nfc->dev == nfc->cdev)
start_region = 1;
for (chip_cs = 0, mem_region = start_region; chip_cs < FMC2_MAX_CE;
chip_cs++, mem_region += 3) {
if (!(nfc->cs_assigned & BIT(chip_cs)))
continue;
nfc->data_base[chip_cs] = devm_platform_get_and_ioremap_resource(pdev,
mem_region, &res);
if (IS_ERR(nfc->data_base[chip_cs]))
return PTR_ERR(nfc->data_base[chip_cs]);
nfc->data_phys_addr[chip_cs] = res->start;
nfc->cmd_base[chip_cs] = devm_platform_ioremap_resource(pdev, mem_region + 1);
if (IS_ERR(nfc->cmd_base[chip_cs]))
return PTR_ERR(nfc->cmd_base[chip_cs]);
nfc->addr_base[chip_cs] = devm_platform_ioremap_resource(pdev, mem_region + 2);
if (IS_ERR(nfc->addr_base[chip_cs]))
return PTR_ERR(nfc->addr_base[chip_cs]);
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_irq(dev, irq, stm32_fmc2_nfc_irq, 0,
dev_name(dev), nfc);
if (ret) {
dev_err(dev, "failed to request irq\n");
return ret;
}
init_completion(&nfc->complete);
nfc->clk = devm_clk_get_enabled(nfc->cdev, NULL);
if (IS_ERR(nfc->clk)) {
dev_err(dev, "can not get and enable the clock\n");
return PTR_ERR(nfc->clk);
}
rstc = devm_reset_control_get(dev, NULL);
if (IS_ERR(rstc)) {
ret = PTR_ERR(rstc);
if (ret == -EPROBE_DEFER)
return ret;
} else {
reset_control_assert(rstc);
reset_control_deassert(rstc);
}
ret = stm32_fmc2_nfc_dma_setup(nfc);
if (ret)
goto err_release_dma;
stm32_fmc2_nfc_init(nfc);
nand = &nfc->nand;
chip = &nand->chip;
mtd = nand_to_mtd(chip);
mtd->dev.parent = dev;
chip->controller = &nfc->base;
chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
NAND_USES_DMA;
stm32_fmc2_nfc_wp_disable(nand);
/* Scan to find existence of the device */
ret = nand_scan(chip, nand->ncs);
if (ret)
goto err_wp_enable;
ret = mtd_device_register(mtd, NULL, 0);
if (ret)
goto err_nand_cleanup;
platform_set_drvdata(pdev, nfc);
return 0;
err_nand_cleanup:
nand_cleanup(chip);
err_wp_enable:
stm32_fmc2_nfc_wp_enable(nand);
err_release_dma:
if (nfc->dma_ecc_ch)
dma_release_channel(nfc->dma_ecc_ch);
if (nfc->dma_tx_ch)
dma_release_channel(nfc->dma_tx_ch);
if (nfc->dma_rx_ch)
dma_release_channel(nfc->dma_rx_ch);
sg_free_table(&nfc->dma_data_sg);
sg_free_table(&nfc->dma_ecc_sg);
return ret;
}
static void stm32_fmc2_nfc_remove(struct platform_device *pdev)
{
struct stm32_fmc2_nfc *nfc = platform_get_drvdata(pdev);
struct stm32_fmc2_nand *nand = &nfc->nand;
struct nand_chip *chip = &nand->chip;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
if (nfc->dma_ecc_ch)
dma_release_channel(nfc->dma_ecc_ch);
if (nfc->dma_tx_ch)
dma_release_channel(nfc->dma_tx_ch);
if (nfc->dma_rx_ch)
dma_release_channel(nfc->dma_rx_ch);
sg_free_table(&nfc->dma_data_sg);
sg_free_table(&nfc->dma_ecc_sg);
stm32_fmc2_nfc_wp_enable(nand);
}
static int __maybe_unused stm32_fmc2_nfc_suspend(struct device *dev)
{
struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev);
struct stm32_fmc2_nand *nand = &nfc->nand;
clk_disable_unprepare(nfc->clk);
stm32_fmc2_nfc_wp_enable(nand);
pinctrl_pm_select_sleep_state(dev);
return 0;
}
static int __maybe_unused stm32_fmc2_nfc_resume(struct device *dev)
{
struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev);
struct stm32_fmc2_nand *nand = &nfc->nand;
int chip_cs, ret;
pinctrl_pm_select_default_state(dev);
ret = clk_prepare_enable(nfc->clk);
if (ret) {
dev_err(dev, "can not enable the clock\n");
return ret;
}
stm32_fmc2_nfc_init(nfc);
stm32_fmc2_nfc_wp_disable(nand);
for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) {
if (!(nfc->cs_assigned & BIT(chip_cs)))
continue;
nand_reset(&nand->chip, chip_cs);
}
return 0;
}
static SIMPLE_DEV_PM_OPS(stm32_fmc2_nfc_pm_ops, stm32_fmc2_nfc_suspend,
stm32_fmc2_nfc_resume);
static const struct of_device_id stm32_fmc2_nfc_match[] = {
{.compatible = "st,stm32mp15-fmc2"},
{.compatible = "st,stm32mp1-fmc2-nfc"},
{}
};
MODULE_DEVICE_TABLE(of, stm32_fmc2_nfc_match);
static struct platform_driver stm32_fmc2_nfc_driver = {
.probe = stm32_fmc2_nfc_probe,
.remove_new = stm32_fmc2_nfc_remove,
.driver = {
.name = "stm32_fmc2_nfc",
.of_match_table = stm32_fmc2_nfc_match,
.pm = &stm32_fmc2_nfc_pm_ops,
},
};
module_platform_driver(stm32_fmc2_nfc_driver);
MODULE_ALIAS("platform:stm32_fmc2_nfc");
MODULE_AUTHOR("Christophe Kerello <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 NFC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mtd/nand/raw/stm32_fmc2_nand.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Overview:
* Platform independent driver for NDFC (NanD Flash Controller)
* integrated into EP440 cores
*
* Ported to an OF platform driver by Sean MacLennan
*
* The NDFC supports multiple chips, but this driver only supports a
* single chip since I do not have access to any boards with
* multiple chips.
*
* Author: Thomas Gleixner
*
* Copyright 2006 IBM
* Copyright 2008 PIKA Technologies
* Sean MacLennan <[email protected]>
*/
#include <linux/module.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/ndfc.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#define NDFC_MAX_CS 4
struct ndfc_controller {
struct platform_device *ofdev;
void __iomem *ndfcbase;
struct nand_chip chip;
int chip_select;
struct nand_controller ndfc_control;
};
static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
static void ndfc_select_chip(struct nand_chip *nchip, int chip)
{
uint32_t ccr;
struct ndfc_controller *ndfc = nand_get_controller_data(nchip);
ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
if (chip >= 0) {
ccr &= ~NDFC_CCR_BS_MASK;
ccr |= NDFC_CCR_BS(chip + ndfc->chip_select);
} else
ccr |= NDFC_CCR_RESET_CE;
out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
}
static void ndfc_hwcontrol(struct nand_chip *chip, int cmd, unsigned int ctrl)
{
struct ndfc_controller *ndfc = nand_get_controller_data(chip);
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_CMD);
else
writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_ALE);
}
static int ndfc_ready(struct nand_chip *chip)
{
struct ndfc_controller *ndfc = nand_get_controller_data(chip);
return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
}
static void ndfc_enable_hwecc(struct nand_chip *chip, int mode)
{
uint32_t ccr;
struct ndfc_controller *ndfc = nand_get_controller_data(chip);
ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
ccr |= NDFC_CCR_RESET_ECC;
out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
wmb();
}
static int ndfc_calculate_ecc(struct nand_chip *chip,
const u_char *dat, u_char *ecc_code)
{
struct ndfc_controller *ndfc = nand_get_controller_data(chip);
uint32_t ecc;
uint8_t *p = (uint8_t *)&ecc;
wmb();
ecc = in_be32(ndfc->ndfcbase + NDFC_ECC);
/* The NDFC uses Smart Media (SMC) bytes order */
ecc_code[0] = p[1];
ecc_code[1] = p[2];
ecc_code[2] = p[3];
return 0;
}
/*
* Speedups for buffer read/write/verify
*
* NDFC allows 32bit read/write of data. So we can speed up the buffer
* functions. No further checking, as nand_base will always read/write
* page aligned.
*/
static void ndfc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
struct ndfc_controller *ndfc = nand_get_controller_data(chip);
uint32_t *p = (uint32_t *) buf;
for(;len > 0; len -= 4)
*p++ = in_be32(ndfc->ndfcbase + NDFC_DATA);
}
static void ndfc_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
{
struct ndfc_controller *ndfc = nand_get_controller_data(chip);
uint32_t *p = (uint32_t *) buf;
for(;len > 0; len -= 4)
out_be32(ndfc->ndfcbase + NDFC_DATA, *p++);
}
/*
* Initialize chip structure
*/
static int ndfc_chip_init(struct ndfc_controller *ndfc,
struct device_node *node)
{
struct device_node *flash_np;
struct nand_chip *chip = &ndfc->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
chip->legacy.IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
chip->legacy.IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA;
chip->legacy.cmd_ctrl = ndfc_hwcontrol;
chip->legacy.dev_ready = ndfc_ready;
chip->legacy.select_chip = ndfc_select_chip;
chip->legacy.chip_delay = 50;
chip->controller = &ndfc->ndfc_control;
chip->legacy.read_buf = ndfc_read_buf;
chip->legacy.write_buf = ndfc_write_buf;
chip->ecc.correct = rawnand_sw_hamming_correct;
chip->ecc.hwctl = ndfc_enable_hwecc;
chip->ecc.calculate = ndfc_calculate_ecc;
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.size = 256;
chip->ecc.bytes = 3;
chip->ecc.strength = 1;
nand_set_controller_data(chip, ndfc);
mtd->dev.parent = &ndfc->ofdev->dev;
flash_np = of_get_next_child(node, NULL);
if (!flash_np)
return -ENODEV;
nand_set_flash_node(chip, flash_np);
mtd->name = kasprintf(GFP_KERNEL, "%s.%pOFn", dev_name(&ndfc->ofdev->dev),
flash_np);
if (!mtd->name) {
ret = -ENOMEM;
goto err;
}
ret = nand_scan(chip, 1);
if (ret)
goto err;
ret = mtd_device_register(mtd, NULL, 0);
err:
of_node_put(flash_np);
if (ret)
kfree(mtd->name);
return ret;
}
static int ndfc_probe(struct platform_device *ofdev)
{
struct ndfc_controller *ndfc;
const __be32 *reg;
u32 ccr;
u32 cs;
int err, len;
/* Read the reg property to get the chip select */
reg = of_get_property(ofdev->dev.of_node, "reg", &len);
if (reg == NULL || len != 12) {
dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
return -ENOENT;
}
cs = be32_to_cpu(reg[0]);
if (cs >= NDFC_MAX_CS) {
dev_err(&ofdev->dev, "invalid CS number (%d)\n", cs);
return -EINVAL;
}
ndfc = &ndfc_ctrl[cs];
ndfc->chip_select = cs;
nand_controller_init(&ndfc->ndfc_control);
ndfc->ofdev = ofdev;
dev_set_drvdata(&ofdev->dev, ndfc);
ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
if (!ndfc->ndfcbase) {
dev_err(&ofdev->dev, "failed to get memory\n");
return -EIO;
}
ccr = NDFC_CCR_BS(ndfc->chip_select);
/* It is ok if ccr does not exist - just default to 0 */
reg = of_get_property(ofdev->dev.of_node, "ccr", NULL);
if (reg)
ccr |= be32_to_cpup(reg);
out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
/* Set the bank settings if given */
reg = of_get_property(ofdev->dev.of_node, "bank-settings", NULL);
if (reg) {
int offset = NDFC_BCFG0 + (ndfc->chip_select << 2);
out_be32(ndfc->ndfcbase + offset, be32_to_cpup(reg));
}
err = ndfc_chip_init(ndfc, ofdev->dev.of_node);
if (err) {
iounmap(ndfc->ndfcbase);
return err;
}
return 0;
}
static void ndfc_remove(struct platform_device *ofdev)
{
struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
struct nand_chip *chip = &ndfc->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
ret = mtd_device_unregister(mtd);
WARN_ON(ret);
nand_cleanup(chip);
kfree(mtd->name);
}
static const struct of_device_id ndfc_match[] = {
{ .compatible = "ibm,ndfc", },
{}
};
MODULE_DEVICE_TABLE(of, ndfc_match);
static struct platform_driver ndfc_driver = {
.driver = {
.name = "ndfc",
.of_match_table = ndfc_match,
},
.probe = ndfc_probe,
.remove_new = ndfc_remove,
};
module_platform_driver(ndfc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Thomas Gleixner <[email protected]>");
MODULE_DESCRIPTION("OF Platform driver for NDFC");
| linux-master | drivers/mtd/nand/raw/ndfc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Evatronix/Renesas R-Car Gen3, RZ/N1D, RZ/N1S, RZ/N1L NAND controller driver
*
* Copyright (C) 2021 Schneider Electric
* Author: Miquel RAYNAL <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#define COMMAND_REG 0x00
#define COMMAND_SEQ(x) FIELD_PREP(GENMASK(5, 0), (x))
#define COMMAND_SEQ_10 COMMAND_SEQ(0x2A)
#define COMMAND_SEQ_12 COMMAND_SEQ(0x0C)
#define COMMAND_SEQ_18 COMMAND_SEQ(0x32)
#define COMMAND_SEQ_19 COMMAND_SEQ(0x13)
#define COMMAND_SEQ_GEN_IN COMMAND_SEQ_18
#define COMMAND_SEQ_GEN_OUT COMMAND_SEQ_19
#define COMMAND_SEQ_READ_PAGE COMMAND_SEQ_10
#define COMMAND_SEQ_WRITE_PAGE COMMAND_SEQ_12
#define COMMAND_INPUT_SEL_AHBS 0
#define COMMAND_INPUT_SEL_DMA BIT(6)
#define COMMAND_FIFO_SEL 0
#define COMMAND_DATA_SEL BIT(7)
#define COMMAND_0(x) FIELD_PREP(GENMASK(15, 8), (x))
#define COMMAND_1(x) FIELD_PREP(GENMASK(23, 16), (x))
#define COMMAND_2(x) FIELD_PREP(GENMASK(31, 24), (x))
#define CONTROL_REG 0x04
#define CONTROL_CHECK_RB_LINE 0
#define CONTROL_ECC_BLOCK_SIZE(x) FIELD_PREP(GENMASK(2, 1), (x))
#define CONTROL_ECC_BLOCK_SIZE_256 CONTROL_ECC_BLOCK_SIZE(0)
#define CONTROL_ECC_BLOCK_SIZE_512 CONTROL_ECC_BLOCK_SIZE(1)
#define CONTROL_ECC_BLOCK_SIZE_1024 CONTROL_ECC_BLOCK_SIZE(2)
#define CONTROL_INT_EN BIT(4)
#define CONTROL_ECC_EN BIT(5)
#define CONTROL_BLOCK_SIZE(x) FIELD_PREP(GENMASK(7, 6), (x))
#define CONTROL_BLOCK_SIZE_32P CONTROL_BLOCK_SIZE(0)
#define CONTROL_BLOCK_SIZE_64P CONTROL_BLOCK_SIZE(1)
#define CONTROL_BLOCK_SIZE_128P CONTROL_BLOCK_SIZE(2)
#define CONTROL_BLOCK_SIZE_256P CONTROL_BLOCK_SIZE(3)
#define STATUS_REG 0x8
#define MEM_RDY(cs, reg) (FIELD_GET(GENMASK(3, 0), (reg)) & BIT(cs))
#define CTRL_RDY(reg) (FIELD_GET(BIT(8), (reg)) == 0)
#define ECC_CTRL_REG 0x18
#define ECC_CTRL_CAP(x) FIELD_PREP(GENMASK(2, 0), (x))
#define ECC_CTRL_CAP_2B ECC_CTRL_CAP(0)
#define ECC_CTRL_CAP_4B ECC_CTRL_CAP(1)
#define ECC_CTRL_CAP_8B ECC_CTRL_CAP(2)
#define ECC_CTRL_CAP_16B ECC_CTRL_CAP(3)
#define ECC_CTRL_CAP_24B ECC_CTRL_CAP(4)
#define ECC_CTRL_CAP_32B ECC_CTRL_CAP(5)
#define ECC_CTRL_ERR_THRESHOLD(x) FIELD_PREP(GENMASK(13, 8), (x))
#define INT_MASK_REG 0x10
#define INT_STATUS_REG 0x14
#define INT_CMD_END BIT(1)
#define INT_DMA_END BIT(3)
#define INT_MEM_RDY(cs) FIELD_PREP(GENMASK(11, 8), BIT(cs))
#define INT_DMA_ENDED BIT(3)
#define MEM_IS_RDY(cs, reg) (FIELD_GET(GENMASK(11, 8), (reg)) & BIT(cs))
#define DMA_HAS_ENDED(reg) FIELD_GET(BIT(3), (reg))
#define ECC_OFFSET_REG 0x1C
#define ECC_OFFSET(x) FIELD_PREP(GENMASK(15, 0), (x))
#define ECC_STAT_REG 0x20
#define ECC_STAT_CORRECTABLE(cs, reg) (FIELD_GET(GENMASK(3, 0), (reg)) & BIT(cs))
#define ECC_STAT_UNCORRECTABLE(cs, reg) (FIELD_GET(GENMASK(11, 8), (reg)) & BIT(cs))
#define ADDR0_COL_REG 0x24
#define ADDR0_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
#define ADDR0_ROW_REG 0x28
#define ADDR0_ROW(x) FIELD_PREP(GENMASK(23, 0), (x))
#define ADDR1_COL_REG 0x2C
#define ADDR1_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
#define ADDR1_ROW_REG 0x30
#define ADDR1_ROW(x) FIELD_PREP(GENMASK(23, 0), (x))
#define FIFO_DATA_REG 0x38
#define DATA_REG 0x3C
#define DATA_REG_SIZE_REG 0x40
#define DMA_ADDR_LOW_REG 0x64
#define DMA_ADDR_HIGH_REG 0x68
#define DMA_CNT_REG 0x6C
#define DMA_CTRL_REG 0x70
#define DMA_CTRL_INCREMENT_BURST_4 0
#define DMA_CTRL_REGISTER_MANAGED_MODE 0
#define DMA_CTRL_START BIT(7)
#define MEM_CTRL_REG 0x80
#define MEM_CTRL_CS(cs) FIELD_PREP(GENMASK(1, 0), (cs))
#define MEM_CTRL_DIS_WP(cs) FIELD_PREP(GENMASK(11, 8), BIT((cs)))
#define DATA_SIZE_REG 0x84
#define DATA_SIZE(x) FIELD_PREP(GENMASK(14, 0), (x))
#define TIMINGS_ASYN_REG 0x88
#define TIMINGS_ASYN_TRWP(x) FIELD_PREP(GENMASK(3, 0), max((x), 1U) - 1)
#define TIMINGS_ASYN_TRWH(x) FIELD_PREP(GENMASK(7, 4), max((x), 1U) - 1)
#define TIM_SEQ0_REG 0x90
#define TIM_SEQ0_TCCS(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
#define TIM_SEQ0_TADL(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
#define TIM_SEQ0_TRHW(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
#define TIM_SEQ0_TWHR(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
#define TIM_SEQ1_REG 0x94
#define TIM_SEQ1_TWB(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
#define TIM_SEQ1_TRR(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
#define TIM_SEQ1_TWW(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
#define TIM_GEN_SEQ0_REG 0x98
#define TIM_GEN_SEQ0_D0(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
#define TIM_GEN_SEQ0_D1(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
#define TIM_GEN_SEQ0_D2(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
#define TIM_GEN_SEQ0_D3(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
#define TIM_GEN_SEQ1_REG 0x9c
#define TIM_GEN_SEQ1_D4(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
#define TIM_GEN_SEQ1_D5(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
#define TIM_GEN_SEQ1_D6(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
#define TIM_GEN_SEQ1_D7(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
#define TIM_GEN_SEQ2_REG 0xA0
#define TIM_GEN_SEQ2_D8(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
#define TIM_GEN_SEQ2_D9(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
#define TIM_GEN_SEQ2_D10(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
#define TIM_GEN_SEQ2_D11(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
#define FIFO_INIT_REG 0xB4
#define FIFO_INIT BIT(0)
#define FIFO_STATE_REG 0xB4
#define FIFO_STATE_R_EMPTY(reg) FIELD_GET(BIT(0), (reg))
#define FIFO_STATE_W_FULL(reg) FIELD_GET(BIT(1), (reg))
#define FIFO_STATE_C_EMPTY(reg) FIELD_GET(BIT(2), (reg))
#define FIFO_STATE_R_FULL(reg) FIELD_GET(BIT(6), (reg))
#define FIFO_STATE_W_EMPTY(reg) FIELD_GET(BIT(7), (reg))
#define GEN_SEQ_CTRL_REG 0xB8
#define GEN_SEQ_CMD0_EN BIT(0)
#define GEN_SEQ_CMD1_EN BIT(1)
#define GEN_SEQ_CMD2_EN BIT(2)
#define GEN_SEQ_CMD3_EN BIT(3)
#define GEN_SEQ_COL_A0(x) FIELD_PREP(GENMASK(5, 4), min((x), 2U))
#define GEN_SEQ_COL_A1(x) FIELD_PREP(GENMASK(7, 6), min((x), 2U))
#define GEN_SEQ_ROW_A0(x) FIELD_PREP(GENMASK(9, 8), min((x), 3U))
#define GEN_SEQ_ROW_A1(x) FIELD_PREP(GENMASK(11, 10), min((x), 3U))
#define GEN_SEQ_DATA_EN BIT(12)
#define GEN_SEQ_DELAY_EN(x) FIELD_PREP(GENMASK(14, 13), (x))
#define GEN_SEQ_DELAY0_EN GEN_SEQ_DELAY_EN(1)
#define GEN_SEQ_DELAY1_EN GEN_SEQ_DELAY_EN(2)
#define GEN_SEQ_IMD_SEQ BIT(15)
#define GEN_SEQ_COMMAND_3(x) FIELD_PREP(GENMASK(26, 16), (x))
#define DMA_TLVL_REG 0x114
#define DMA_TLVL(x) FIELD_PREP(GENMASK(7, 0), (x))
#define DMA_TLVL_MAX DMA_TLVL(0xFF)
#define TIM_GEN_SEQ3_REG 0x134
#define TIM_GEN_SEQ3_D12(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
#define ECC_CNT_REG 0x14C
#define ECC_CNT(cs, reg) FIELD_GET(GENMASK(5, 0), (reg) >> ((cs) * 8))
#define RNANDC_CS_NUM 4
#define TO_CYCLES64(ps, period_ns) ((unsigned int)DIV_ROUND_UP_ULL(div_u64(ps, 1000), \
period_ns))
struct rnand_chip_sel {
unsigned int cs;
};
struct rnand_chip {
struct nand_chip chip;
struct list_head node;
int selected_die;
u32 ctrl;
unsigned int nsels;
u32 control;
u32 ecc_ctrl;
u32 timings_asyn;
u32 tim_seq0;
u32 tim_seq1;
u32 tim_gen_seq0;
u32 tim_gen_seq1;
u32 tim_gen_seq2;
u32 tim_gen_seq3;
struct rnand_chip_sel sels[];
};
struct rnandc {
struct nand_controller controller;
struct device *dev;
void __iomem *regs;
unsigned long ext_clk_rate;
unsigned long assigned_cs;
struct list_head chips;
struct nand_chip *selected_chip;
struct completion complete;
bool use_polling;
u8 *buf;
unsigned int buf_sz;
};
struct rnandc_op {
u32 command;
u32 addr0_col;
u32 addr0_row;
u32 addr1_col;
u32 addr1_row;
u32 data_size;
u32 ecc_offset;
u32 gen_seq_ctrl;
u8 *buf;
bool read;
unsigned int len;
};
static inline struct rnandc *to_rnandc(struct nand_controller *ctrl)
{
return container_of(ctrl, struct rnandc, controller);
}
static inline struct rnand_chip *to_rnand(struct nand_chip *chip)
{
return container_of(chip, struct rnand_chip, chip);
}
static inline unsigned int to_rnandc_cs(struct rnand_chip *nand)
{
return nand->sels[nand->selected_die].cs;
}
static void rnandc_dis_correction(struct rnandc *rnandc)
{
u32 control;
control = readl_relaxed(rnandc->regs + CONTROL_REG);
control &= ~CONTROL_ECC_EN;
writel_relaxed(control, rnandc->regs + CONTROL_REG);
}
static void rnandc_en_correction(struct rnandc *rnandc)
{
u32 control;
control = readl_relaxed(rnandc->regs + CONTROL_REG);
control |= CONTROL_ECC_EN;
writel_relaxed(control, rnandc->regs + CONTROL_REG);
}
static void rnandc_clear_status(struct rnandc *rnandc)
{
writel_relaxed(0, rnandc->regs + INT_STATUS_REG);
writel_relaxed(0, rnandc->regs + ECC_STAT_REG);
writel_relaxed(0, rnandc->regs + ECC_CNT_REG);
}
static void rnandc_dis_interrupts(struct rnandc *rnandc)
{
writel_relaxed(0, rnandc->regs + INT_MASK_REG);
}
static void rnandc_en_interrupts(struct rnandc *rnandc, u32 val)
{
if (!rnandc->use_polling)
writel_relaxed(val, rnandc->regs + INT_MASK_REG);
}
static void rnandc_clear_fifo(struct rnandc *rnandc)
{
writel_relaxed(FIFO_INIT, rnandc->regs + FIFO_INIT_REG);
}
static void rnandc_select_target(struct nand_chip *chip, int die_nr)
{
struct rnand_chip *rnand = to_rnand(chip);
struct rnandc *rnandc = to_rnandc(chip->controller);
unsigned int cs = rnand->sels[die_nr].cs;
if (chip == rnandc->selected_chip && die_nr == rnand->selected_die)
return;
rnandc_clear_status(rnandc);
writel_relaxed(MEM_CTRL_CS(cs) | MEM_CTRL_DIS_WP(cs), rnandc->regs + MEM_CTRL_REG);
writel_relaxed(rnand->control, rnandc->regs + CONTROL_REG);
writel_relaxed(rnand->ecc_ctrl, rnandc->regs + ECC_CTRL_REG);
writel_relaxed(rnand->timings_asyn, rnandc->regs + TIMINGS_ASYN_REG);
writel_relaxed(rnand->tim_seq0, rnandc->regs + TIM_SEQ0_REG);
writel_relaxed(rnand->tim_seq1, rnandc->regs + TIM_SEQ1_REG);
writel_relaxed(rnand->tim_gen_seq0, rnandc->regs + TIM_GEN_SEQ0_REG);
writel_relaxed(rnand->tim_gen_seq1, rnandc->regs + TIM_GEN_SEQ1_REG);
writel_relaxed(rnand->tim_gen_seq2, rnandc->regs + TIM_GEN_SEQ2_REG);
writel_relaxed(rnand->tim_gen_seq3, rnandc->regs + TIM_GEN_SEQ3_REG);
rnandc->selected_chip = chip;
rnand->selected_die = die_nr;
}
static void rnandc_trigger_op(struct rnandc *rnandc, struct rnandc_op *rop)
{
writel_relaxed(rop->addr0_col, rnandc->regs + ADDR0_COL_REG);
writel_relaxed(rop->addr0_row, rnandc->regs + ADDR0_ROW_REG);
writel_relaxed(rop->addr1_col, rnandc->regs + ADDR1_COL_REG);
writel_relaxed(rop->addr1_row, rnandc->regs + ADDR1_ROW_REG);
writel_relaxed(rop->ecc_offset, rnandc->regs + ECC_OFFSET_REG);
writel_relaxed(rop->gen_seq_ctrl, rnandc->regs + GEN_SEQ_CTRL_REG);
writel_relaxed(DATA_SIZE(rop->len), rnandc->regs + DATA_SIZE_REG);
writel_relaxed(rop->command, rnandc->regs + COMMAND_REG);
}
static void rnandc_trigger_dma(struct rnandc *rnandc)
{
writel_relaxed(DMA_CTRL_INCREMENT_BURST_4 |
DMA_CTRL_REGISTER_MANAGED_MODE |
DMA_CTRL_START, rnandc->regs + DMA_CTRL_REG);
}
static irqreturn_t rnandc_irq_handler(int irq, void *private)
{
struct rnandc *rnandc = private;
rnandc_dis_interrupts(rnandc);
complete(&rnandc->complete);
return IRQ_HANDLED;
}
static int rnandc_wait_end_of_op(struct rnandc *rnandc,
struct nand_chip *chip)
{
struct rnand_chip *rnand = to_rnand(chip);
unsigned int cs = to_rnandc_cs(rnand);
u32 status;
int ret;
ret = readl_poll_timeout(rnandc->regs + STATUS_REG, status,
MEM_RDY(cs, status) && CTRL_RDY(status),
1, 100000);
if (ret)
dev_err(rnandc->dev, "Operation timed out, status: 0x%08x\n",
status);
return ret;
}
static int rnandc_wait_end_of_io(struct rnandc *rnandc,
struct nand_chip *chip)
{
int timeout_ms = 1000;
int ret;
if (rnandc->use_polling) {
struct rnand_chip *rnand = to_rnand(chip);
unsigned int cs = to_rnandc_cs(rnand);
u32 status;
ret = readl_poll_timeout(rnandc->regs + INT_STATUS_REG, status,
MEM_IS_RDY(cs, status) &
DMA_HAS_ENDED(status),
0, timeout_ms * 1000);
} else {
ret = wait_for_completion_timeout(&rnandc->complete,
msecs_to_jiffies(timeout_ms));
if (!ret)
ret = -ETIMEDOUT;
else
ret = 0;
}
return ret;
}
static int rnandc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct rnandc *rnandc = to_rnandc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
struct rnand_chip *rnand = to_rnand(chip);
unsigned int cs = to_rnandc_cs(rnand);
struct rnandc_op rop = {
.command = COMMAND_INPUT_SEL_DMA | COMMAND_0(NAND_CMD_READ0) |
COMMAND_2(NAND_CMD_READSTART) | COMMAND_FIFO_SEL |
COMMAND_SEQ_READ_PAGE,
.addr0_row = page,
.len = mtd->writesize,
.ecc_offset = ECC_OFFSET(mtd->writesize + 2),
};
unsigned int max_bitflips = 0;
dma_addr_t dma_addr;
u32 ecc_stat;
int bf, ret, i;
/* Prepare controller */
rnandc_select_target(chip, chip->cur_cs);
rnandc_clear_status(rnandc);
reinit_completion(&rnandc->complete);
rnandc_en_interrupts(rnandc, INT_DMA_ENDED);
rnandc_en_correction(rnandc);
/* Configure DMA */
dma_addr = dma_map_single(rnandc->dev, rnandc->buf, mtd->writesize,
DMA_FROM_DEVICE);
writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
rnandc_trigger_op(rnandc, &rop);
rnandc_trigger_dma(rnandc);
ret = rnandc_wait_end_of_io(rnandc, chip);
dma_unmap_single(rnandc->dev, dma_addr, mtd->writesize, DMA_FROM_DEVICE);
rnandc_dis_correction(rnandc);
if (ret) {
dev_err(rnandc->dev, "Read page operation never ending\n");
return ret;
}
ecc_stat = readl_relaxed(rnandc->regs + ECC_STAT_REG);
if (oob_required || ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
ret = nand_change_read_column_op(chip, mtd->writesize,
chip->oob_poi, mtd->oobsize,
false);
if (ret)
return ret;
}
if (ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
for (i = 0; i < chip->ecc.steps; i++) {
unsigned int off = i * chip->ecc.size;
unsigned int eccoff = i * chip->ecc.bytes;
bf = nand_check_erased_ecc_chunk(rnandc->buf + off,
chip->ecc.size,
chip->oob_poi + 2 + eccoff,
chip->ecc.bytes,
NULL, 0,
chip->ecc.strength);
if (bf < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += bf;
max_bitflips = max_t(unsigned int, max_bitflips, bf);
}
}
} else if (ECC_STAT_CORRECTABLE(cs, ecc_stat)) {
bf = ECC_CNT(cs, readl_relaxed(rnandc->regs + ECC_CNT_REG));
/*
* The number of bitflips is an approximation given the fact
* that this controller does not provide per-chunk details but
* only gives statistics on the entire page.
*/
mtd->ecc_stats.corrected += bf;
}
memcpy(buf, rnandc->buf, mtd->writesize);
return 0;
}
static int rnandc_read_subpage_hw_ecc(struct nand_chip *chip, u32 req_offset,
u32 req_len, u8 *bufpoi, int page)
{
struct rnandc *rnandc = to_rnandc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
struct rnand_chip *rnand = to_rnand(chip);
unsigned int cs = to_rnandc_cs(rnand);
unsigned int page_off = round_down(req_offset, chip->ecc.size);
unsigned int real_len = round_up(req_offset + req_len - page_off,
chip->ecc.size);
unsigned int start_chunk = page_off / chip->ecc.size;
unsigned int nchunks = real_len / chip->ecc.size;
unsigned int ecc_off = 2 + (start_chunk * chip->ecc.bytes);
struct rnandc_op rop = {
.command = COMMAND_INPUT_SEL_AHBS | COMMAND_0(NAND_CMD_READ0) |
COMMAND_2(NAND_CMD_READSTART) | COMMAND_FIFO_SEL |
COMMAND_SEQ_READ_PAGE,
.addr0_row = page,
.addr0_col = page_off,
.len = real_len,
.ecc_offset = ECC_OFFSET(mtd->writesize + ecc_off),
};
unsigned int max_bitflips = 0, i;
u32 ecc_stat;
int bf, ret;
/* Prepare controller */
rnandc_select_target(chip, chip->cur_cs);
rnandc_clear_status(rnandc);
rnandc_en_correction(rnandc);
rnandc_trigger_op(rnandc, &rop);
while (!FIFO_STATE_C_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
cpu_relax();
while (FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
cpu_relax();
ioread32_rep(rnandc->regs + FIFO_DATA_REG, bufpoi + page_off,
real_len / 4);
if (!FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG))) {
dev_err(rnandc->dev, "Clearing residual data in the read FIFO\n");
rnandc_clear_fifo(rnandc);
}
ret = rnandc_wait_end_of_op(rnandc, chip);
rnandc_dis_correction(rnandc);
if (ret) {
dev_err(rnandc->dev, "Read subpage operation never ending\n");
return ret;
}
ecc_stat = readl_relaxed(rnandc->regs + ECC_STAT_REG);
if (ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
ret = nand_change_read_column_op(chip, mtd->writesize,
chip->oob_poi, mtd->oobsize,
false);
if (ret)
return ret;
for (i = start_chunk; i < nchunks; i++) {
unsigned int dataoff = i * chip->ecc.size;
unsigned int eccoff = 2 + (i * chip->ecc.bytes);
bf = nand_check_erased_ecc_chunk(bufpoi + dataoff,
chip->ecc.size,
chip->oob_poi + eccoff,
chip->ecc.bytes,
NULL, 0,
chip->ecc.strength);
if (bf < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += bf;
max_bitflips = max_t(unsigned int, max_bitflips, bf);
}
}
} else if (ECC_STAT_CORRECTABLE(cs, ecc_stat)) {
bf = ECC_CNT(cs, readl_relaxed(rnandc->regs + ECC_CNT_REG));
/*
* The number of bitflips is an approximation given the fact
* that this controller does not provide per-chunk details but
* only gives statistics on the entire page.
*/
mtd->ecc_stats.corrected += bf;
}
return 0;
}
static int rnandc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
struct rnandc *rnandc = to_rnandc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
struct rnand_chip *rnand = to_rnand(chip);
unsigned int cs = to_rnandc_cs(rnand);
struct rnandc_op rop = {
.command = COMMAND_INPUT_SEL_DMA | COMMAND_0(NAND_CMD_SEQIN) |
COMMAND_1(NAND_CMD_PAGEPROG) | COMMAND_FIFO_SEL |
COMMAND_SEQ_WRITE_PAGE,
.addr0_row = page,
.len = mtd->writesize,
.ecc_offset = ECC_OFFSET(mtd->writesize + 2),
};
dma_addr_t dma_addr;
int ret;
memcpy(rnandc->buf, buf, mtd->writesize);
/* Prepare controller */
rnandc_select_target(chip, chip->cur_cs);
rnandc_clear_status(rnandc);
reinit_completion(&rnandc->complete);
rnandc_en_interrupts(rnandc, INT_MEM_RDY(cs));
rnandc_en_correction(rnandc);
/* Configure DMA */
dma_addr = dma_map_single(rnandc->dev, (void *)rnandc->buf, mtd->writesize,
DMA_TO_DEVICE);
writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
rnandc_trigger_op(rnandc, &rop);
rnandc_trigger_dma(rnandc);
ret = rnandc_wait_end_of_io(rnandc, chip);
dma_unmap_single(rnandc->dev, dma_addr, mtd->writesize, DMA_TO_DEVICE);
rnandc_dis_correction(rnandc);
if (ret) {
dev_err(rnandc->dev, "Write page operation never ending\n");
return ret;
}
if (!oob_required)
return 0;
return nand_change_write_column_op(chip, mtd->writesize, chip->oob_poi,
mtd->oobsize, false);
}
static int rnandc_write_subpage_hw_ecc(struct nand_chip *chip, u32 req_offset,
u32 req_len, const u8 *bufpoi,
int oob_required, int page)
{
struct rnandc *rnandc = to_rnandc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int page_off = round_down(req_offset, chip->ecc.size);
unsigned int real_len = round_up(req_offset + req_len - page_off,
chip->ecc.size);
unsigned int start_chunk = page_off / chip->ecc.size;
unsigned int ecc_off = 2 + (start_chunk * chip->ecc.bytes);
struct rnandc_op rop = {
.command = COMMAND_INPUT_SEL_AHBS | COMMAND_0(NAND_CMD_SEQIN) |
COMMAND_1(NAND_CMD_PAGEPROG) | COMMAND_FIFO_SEL |
COMMAND_SEQ_WRITE_PAGE,
.addr0_row = page,
.addr0_col = page_off,
.len = real_len,
.ecc_offset = ECC_OFFSET(mtd->writesize + ecc_off),
};
int ret;
/* Prepare controller */
rnandc_select_target(chip, chip->cur_cs);
rnandc_clear_status(rnandc);
rnandc_en_correction(rnandc);
rnandc_trigger_op(rnandc, &rop);
while (FIFO_STATE_W_FULL(readl(rnandc->regs + FIFO_STATE_REG)))
cpu_relax();
iowrite32_rep(rnandc->regs + FIFO_DATA_REG, bufpoi + page_off,
real_len / 4);
while (!FIFO_STATE_W_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
cpu_relax();
ret = rnandc_wait_end_of_op(rnandc, chip);
rnandc_dis_correction(rnandc);
if (ret) {
dev_err(rnandc->dev, "Write subpage operation never ending\n");
return ret;
}
return 0;
}
/*
* This controller is simple enough and thus does not need to use the parser
* provided by the core, instead, handle every situation here.
*/
static int rnandc_exec_op(struct nand_chip *chip,
const struct nand_operation *op, bool check_only)
{
struct rnandc *rnandc = to_rnandc(chip->controller);
const struct nand_op_instr *instr = NULL;
struct rnandc_op rop = {
.command = COMMAND_INPUT_SEL_AHBS,
.gen_seq_ctrl = GEN_SEQ_IMD_SEQ,
};
unsigned int cmd_phase = 0, addr_phase = 0, data_phase = 0,
delay_phase = 0, delays = 0;
unsigned int op_id, col_addrs, row_addrs, naddrs, remainder, words, i;
const u8 *addrs;
u32 last_bytes;
int ret;
if (!check_only)
rnandc_select_target(chip, op->cs);
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
nand_op_trace(" ", instr);
switch (instr->type) {
case NAND_OP_CMD_INSTR:
switch (cmd_phase++) {
case 0:
rop.command |= COMMAND_0(instr->ctx.cmd.opcode);
rop.gen_seq_ctrl |= GEN_SEQ_CMD0_EN;
break;
case 1:
rop.gen_seq_ctrl |= GEN_SEQ_COMMAND_3(instr->ctx.cmd.opcode);
rop.gen_seq_ctrl |= GEN_SEQ_CMD3_EN;
if (addr_phase == 0)
addr_phase = 1;
break;
case 2:
rop.command |= COMMAND_2(instr->ctx.cmd.opcode);
rop.gen_seq_ctrl |= GEN_SEQ_CMD2_EN;
if (addr_phase <= 1)
addr_phase = 2;
break;
case 3:
rop.command |= COMMAND_1(instr->ctx.cmd.opcode);
rop.gen_seq_ctrl |= GEN_SEQ_CMD1_EN;
if (addr_phase <= 1)
addr_phase = 2;
if (delay_phase == 0)
delay_phase = 1;
if (data_phase == 0)
data_phase = 1;
break;
default:
return -EOPNOTSUPP;
}
break;
case NAND_OP_ADDR_INSTR:
addrs = instr->ctx.addr.addrs;
naddrs = instr->ctx.addr.naddrs;
if (naddrs > 5)
return -EOPNOTSUPP;
col_addrs = min(2U, naddrs);
row_addrs = naddrs > 2 ? naddrs - col_addrs : 0;
switch (addr_phase++) {
case 0:
for (i = 0; i < col_addrs; i++)
rop.addr0_col |= addrs[i] << (i * 8);
rop.gen_seq_ctrl |= GEN_SEQ_COL_A0(col_addrs);
for (i = 0; i < row_addrs; i++)
rop.addr0_row |= addrs[2 + i] << (i * 8);
rop.gen_seq_ctrl |= GEN_SEQ_ROW_A0(row_addrs);
if (cmd_phase == 0)
cmd_phase = 1;
break;
case 1:
for (i = 0; i < col_addrs; i++)
rop.addr1_col |= addrs[i] << (i * 8);
rop.gen_seq_ctrl |= GEN_SEQ_COL_A1(col_addrs);
for (i = 0; i < row_addrs; i++)
rop.addr1_row |= addrs[2 + i] << (i * 8);
rop.gen_seq_ctrl |= GEN_SEQ_ROW_A1(row_addrs);
if (cmd_phase <= 1)
cmd_phase = 2;
break;
default:
return -EOPNOTSUPP;
}
break;
case NAND_OP_DATA_IN_INSTR:
rop.read = true;
fallthrough;
case NAND_OP_DATA_OUT_INSTR:
rop.gen_seq_ctrl |= GEN_SEQ_DATA_EN;
rop.buf = instr->ctx.data.buf.in;
rop.len = instr->ctx.data.len;
rop.command |= COMMAND_FIFO_SEL;
switch (data_phase++) {
case 0:
if (cmd_phase <= 2)
cmd_phase = 3;
if (addr_phase <= 1)
addr_phase = 2;
if (delay_phase == 0)
delay_phase = 1;
break;
default:
return -EOPNOTSUPP;
}
break;
case NAND_OP_WAITRDY_INSTR:
switch (delay_phase++) {
case 0:
rop.gen_seq_ctrl |= GEN_SEQ_DELAY0_EN;
if (cmd_phase <= 2)
cmd_phase = 3;
break;
case 1:
rop.gen_seq_ctrl |= GEN_SEQ_DELAY1_EN;
if (cmd_phase <= 3)
cmd_phase = 4;
if (data_phase == 0)
data_phase = 1;
break;
default:
return -EOPNOTSUPP;
}
break;
}
}
/*
* Sequence 19 is generic and dedicated to write operations.
* Sequence 18 is also generic and works for all other operations.
*/
if (rop.buf && !rop.read)
rop.command |= COMMAND_SEQ_GEN_OUT;
else
rop.command |= COMMAND_SEQ_GEN_IN;
if (delays > 1) {
dev_err(rnandc->dev, "Cannot handle more than one wait delay\n");
return -EOPNOTSUPP;
}
if (check_only)
return 0;
rnandc_trigger_op(rnandc, &rop);
words = rop.len / sizeof(u32);
remainder = rop.len % sizeof(u32);
if (rop.buf && rop.read) {
while (!FIFO_STATE_C_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
cpu_relax();
while (FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
cpu_relax();
ioread32_rep(rnandc->regs + FIFO_DATA_REG, rop.buf, words);
if (remainder) {
last_bytes = readl_relaxed(rnandc->regs + FIFO_DATA_REG);
memcpy(rop.buf + (words * sizeof(u32)), &last_bytes,
remainder);
}
if (!FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG))) {
dev_warn(rnandc->dev,
"Clearing residual data in the read FIFO\n");
rnandc_clear_fifo(rnandc);
}
} else if (rop.len && !rop.read) {
while (FIFO_STATE_W_FULL(readl(rnandc->regs + FIFO_STATE_REG)))
cpu_relax();
iowrite32_rep(rnandc->regs + FIFO_DATA_REG, rop.buf,
DIV_ROUND_UP(rop.len, 4));
if (remainder) {
last_bytes = 0;
memcpy(&last_bytes, rop.buf + (words * sizeof(u32)), remainder);
writel_relaxed(last_bytes, rnandc->regs + FIFO_DATA_REG);
}
while (!FIFO_STATE_W_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
cpu_relax();
}
ret = rnandc_wait_end_of_op(rnandc, chip);
if (ret)
return ret;
return 0;
}
static int rnandc_setup_interface(struct nand_chip *chip, int chipnr,
const struct nand_interface_config *conf)
{
struct rnand_chip *rnand = to_rnand(chip);
struct rnandc *rnandc = to_rnandc(chip->controller);
unsigned int period_ns = 1000000000 / rnandc->ext_clk_rate;
const struct nand_sdr_timings *sdr;
unsigned int cyc, cle, ale, bef_dly, ca_to_data;
sdr = nand_get_sdr_timings(conf);
if (IS_ERR(sdr))
return PTR_ERR(sdr);
if (sdr->tRP_min != sdr->tWP_min || sdr->tREH_min != sdr->tWH_min) {
dev_err(rnandc->dev, "Read and write hold times must be identical\n");
return -EINVAL;
}
if (chipnr < 0)
return 0;
rnand->timings_asyn =
TIMINGS_ASYN_TRWP(TO_CYCLES64(sdr->tRP_min, period_ns)) |
TIMINGS_ASYN_TRWH(TO_CYCLES64(sdr->tREH_min, period_ns));
rnand->tim_seq0 =
TIM_SEQ0_TCCS(TO_CYCLES64(sdr->tCCS_min, period_ns)) |
TIM_SEQ0_TADL(TO_CYCLES64(sdr->tADL_min, period_ns)) |
TIM_SEQ0_TRHW(TO_CYCLES64(sdr->tRHW_min, period_ns)) |
TIM_SEQ0_TWHR(TO_CYCLES64(sdr->tWHR_min, period_ns));
rnand->tim_seq1 =
TIM_SEQ1_TWB(TO_CYCLES64(sdr->tWB_max, period_ns)) |
TIM_SEQ1_TRR(TO_CYCLES64(sdr->tRR_min, period_ns)) |
TIM_SEQ1_TWW(TO_CYCLES64(sdr->tWW_min, period_ns));
cyc = sdr->tDS_min + sdr->tDH_min;
cle = sdr->tCLH_min + sdr->tCLS_min;
ale = sdr->tALH_min + sdr->tALS_min;
bef_dly = sdr->tWB_max - sdr->tDH_min;
ca_to_data = sdr->tWHR_min + sdr->tREA_max - sdr->tDH_min;
/*
* D0 = CMD -> ADDR = tCLH + tCLS - 1 cycle
* D1 = CMD -> CMD = tCLH + tCLS - 1 cycle
* D2 = CMD -> DLY = tWB - tDH
* D3 = CMD -> DATA = tWHR + tREA - tDH
*/
rnand->tim_gen_seq0 =
TIM_GEN_SEQ0_D0(TO_CYCLES64(cle - cyc, period_ns)) |
TIM_GEN_SEQ0_D1(TO_CYCLES64(cle - cyc, period_ns)) |
TIM_GEN_SEQ0_D2(TO_CYCLES64(bef_dly, period_ns)) |
TIM_GEN_SEQ0_D3(TO_CYCLES64(ca_to_data, period_ns));
/*
* D4 = ADDR -> CMD = tALH + tALS - 1 cyle
* D5 = ADDR -> ADDR = tALH + tALS - 1 cyle
* D6 = ADDR -> DLY = tWB - tDH
* D7 = ADDR -> DATA = tWHR + tREA - tDH
*/
rnand->tim_gen_seq1 =
TIM_GEN_SEQ1_D4(TO_CYCLES64(ale - cyc, period_ns)) |
TIM_GEN_SEQ1_D5(TO_CYCLES64(ale - cyc, period_ns)) |
TIM_GEN_SEQ1_D6(TO_CYCLES64(bef_dly, period_ns)) |
TIM_GEN_SEQ1_D7(TO_CYCLES64(ca_to_data, period_ns));
/*
* D8 = DLY -> DATA = tRR + tREA
* D9 = DLY -> CMD = tRR
* D10 = DATA -> CMD = tCLH + tCLS - 1 cycle
* D11 = DATA -> DLY = tWB - tDH
*/
rnand->tim_gen_seq2 =
TIM_GEN_SEQ2_D8(TO_CYCLES64(sdr->tRR_min + sdr->tREA_max, period_ns)) |
TIM_GEN_SEQ2_D9(TO_CYCLES64(sdr->tRR_min, period_ns)) |
TIM_GEN_SEQ2_D10(TO_CYCLES64(cle - cyc, period_ns)) |
TIM_GEN_SEQ2_D11(TO_CYCLES64(bef_dly, period_ns));
/* D12 = DATA -> END = tCLH - tDH */
rnand->tim_gen_seq3 =
TIM_GEN_SEQ3_D12(TO_CYCLES64(sdr->tCLH_min - sdr->tDH_min, period_ns));
return 0;
}
static int rnandc_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
unsigned int eccbytes = round_up(chip->ecc.bytes, 4) * chip->ecc.steps;
if (section)
return -ERANGE;
oobregion->offset = 2;
oobregion->length = eccbytes;
return 0;
}
static int rnandc_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
unsigned int eccbytes = round_up(chip->ecc.bytes, 4) * chip->ecc.steps;
if (section)
return -ERANGE;
oobregion->offset = 2 + eccbytes;
oobregion->length = mtd->oobsize - oobregion->offset;
return 0;
}
static const struct mtd_ooblayout_ops rnandc_ooblayout_ops = {
.ecc = rnandc_ooblayout_ecc,
.free = rnandc_ooblayout_free,
};
static int rnandc_hw_ecc_controller_init(struct nand_chip *chip)
{
struct rnand_chip *rnand = to_rnand(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
struct rnandc *rnandc = to_rnandc(chip->controller);
if (mtd->writesize > SZ_16K) {
dev_err(rnandc->dev, "Unsupported page size\n");
return -EINVAL;
}
switch (chip->ecc.size) {
case SZ_256:
rnand->control |= CONTROL_ECC_BLOCK_SIZE_256;
break;
case SZ_512:
rnand->control |= CONTROL_ECC_BLOCK_SIZE_512;
break;
case SZ_1K:
rnand->control |= CONTROL_ECC_BLOCK_SIZE_1024;
break;
default:
dev_err(rnandc->dev, "Unsupported ECC chunk size\n");
return -EINVAL;
}
switch (chip->ecc.strength) {
case 2:
chip->ecc.bytes = 4;
rnand->ecc_ctrl |= ECC_CTRL_CAP_2B;
break;
case 4:
chip->ecc.bytes = 7;
rnand->ecc_ctrl |= ECC_CTRL_CAP_4B;
break;
case 8:
chip->ecc.bytes = 14;
rnand->ecc_ctrl |= ECC_CTRL_CAP_8B;
break;
case 16:
chip->ecc.bytes = 28;
rnand->ecc_ctrl |= ECC_CTRL_CAP_16B;
break;
case 24:
chip->ecc.bytes = 42;
rnand->ecc_ctrl |= ECC_CTRL_CAP_24B;
break;
case 32:
chip->ecc.bytes = 56;
rnand->ecc_ctrl |= ECC_CTRL_CAP_32B;
break;
default:
dev_err(rnandc->dev, "Unsupported ECC strength\n");
return -EINVAL;
}
rnand->ecc_ctrl |= ECC_CTRL_ERR_THRESHOLD(chip->ecc.strength);
mtd_set_ooblayout(mtd, &rnandc_ooblayout_ops);
chip->ecc.steps = mtd->writesize / chip->ecc.size;
chip->ecc.read_page = rnandc_read_page_hw_ecc;
chip->ecc.read_subpage = rnandc_read_subpage_hw_ecc;
chip->ecc.write_page = rnandc_write_page_hw_ecc;
chip->ecc.write_subpage = rnandc_write_subpage_hw_ecc;
return 0;
}
static int rnandc_ecc_init(struct nand_chip *chip)
{
struct nand_ecc_ctrl *ecc = &chip->ecc;
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(&chip->base);
struct rnandc *rnandc = to_rnandc(chip->controller);
int ret;
if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
(!ecc->size || !ecc->strength)) {
if (requirements->step_size && requirements->strength) {
ecc->size = requirements->step_size;
ecc->strength = requirements->strength;
} else {
dev_err(rnandc->dev, "No minimum ECC strength\n");
return -EINVAL;
}
}
switch (ecc->engine_type) {
case NAND_ECC_ENGINE_TYPE_ON_HOST:
ret = rnandc_hw_ecc_controller_init(chip);
if (ret)
return ret;
break;
case NAND_ECC_ENGINE_TYPE_NONE:
case NAND_ECC_ENGINE_TYPE_SOFT:
case NAND_ECC_ENGINE_TYPE_ON_DIE:
break;
default:
return -EINVAL;
}
return 0;
}
static int rnandc_attach_chip(struct nand_chip *chip)
{
struct rnand_chip *rnand = to_rnand(chip);
struct rnandc *rnandc = to_rnandc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg = nanddev_get_memorg(&chip->base);
int ret;
/* Do not store BBT bits in the OOB section as it is not protected */
if (chip->bbt_options & NAND_BBT_USE_FLASH)
chip->bbt_options |= NAND_BBT_NO_OOB;
if (mtd->writesize <= 512) {
dev_err(rnandc->dev, "Small page devices not supported\n");
return -EINVAL;
}
rnand->control |= CONTROL_CHECK_RB_LINE | CONTROL_INT_EN;
switch (memorg->pages_per_eraseblock) {
case 32:
rnand->control |= CONTROL_BLOCK_SIZE_32P;
break;
case 64:
rnand->control |= CONTROL_BLOCK_SIZE_64P;
break;
case 128:
rnand->control |= CONTROL_BLOCK_SIZE_128P;
break;
case 256:
rnand->control |= CONTROL_BLOCK_SIZE_256P;
break;
default:
dev_err(rnandc->dev, "Unsupported memory organization\n");
return -EINVAL;
}
chip->options |= NAND_SUBPAGE_READ;
ret = rnandc_ecc_init(chip);
if (ret) {
dev_err(rnandc->dev, "ECC initialization failed (%d)\n", ret);
return ret;
}
/* Force an update of the configuration registers */
rnand->selected_die = -1;
return 0;
}
static const struct nand_controller_ops rnandc_ops = {
.attach_chip = rnandc_attach_chip,
.exec_op = rnandc_exec_op,
.setup_interface = rnandc_setup_interface,
};
static int rnandc_alloc_dma_buf(struct rnandc *rnandc,
struct mtd_info *new_mtd)
{
unsigned int max_len = new_mtd->writesize + new_mtd->oobsize;
struct rnand_chip *entry, *temp;
struct nand_chip *chip;
struct mtd_info *mtd;
list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
chip = &entry->chip;
mtd = nand_to_mtd(chip);
max_len = max(max_len, mtd->writesize + mtd->oobsize);
}
if (rnandc->buf && rnandc->buf_sz < max_len) {
devm_kfree(rnandc->dev, rnandc->buf);
rnandc->buf = NULL;
}
if (!rnandc->buf) {
rnandc->buf_sz = max_len;
rnandc->buf = devm_kmalloc(rnandc->dev, max_len,
GFP_KERNEL | GFP_DMA);
if (!rnandc->buf)
return -ENOMEM;
}
return 0;
}
static int rnandc_chip_init(struct rnandc *rnandc, struct device_node *np)
{
struct rnand_chip *rnand;
struct mtd_info *mtd;
struct nand_chip *chip;
int nsels, ret, i;
u32 cs;
nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
if (nsels <= 0) {
ret = (nsels < 0) ? nsels : -EINVAL;
dev_err(rnandc->dev, "Invalid reg property (%d)\n", ret);
return ret;
}
/* Alloc the driver's NAND chip structure */
rnand = devm_kzalloc(rnandc->dev, struct_size(rnand, sels, nsels),
GFP_KERNEL);
if (!rnand)
return -ENOMEM;
rnand->nsels = nsels;
rnand->selected_die = -1;
for (i = 0; i < nsels; i++) {
ret = of_property_read_u32_index(np, "reg", i, &cs);
if (ret) {
dev_err(rnandc->dev, "Incomplete reg property (%d)\n", ret);
return ret;
}
if (cs >= RNANDC_CS_NUM) {
dev_err(rnandc->dev, "Invalid reg property (%d)\n", cs);
return -EINVAL;
}
if (test_and_set_bit(cs, &rnandc->assigned_cs)) {
dev_err(rnandc->dev, "CS %d already assigned\n", cs);
return -EINVAL;
}
/*
* No need to check for RB or WP properties, there is a 1:1
* mandatory mapping with the CS.
*/
rnand->sels[i].cs = cs;
}
chip = &rnand->chip;
chip->controller = &rnandc->controller;
nand_set_flash_node(chip, np);
mtd = nand_to_mtd(chip);
mtd->dev.parent = rnandc->dev;
if (!mtd->name) {
dev_err(rnandc->dev, "Missing MTD label\n");
return -EINVAL;
}
ret = nand_scan(chip, rnand->nsels);
if (ret) {
dev_err(rnandc->dev, "Failed to scan the NAND chip (%d)\n", ret);
return ret;
}
ret = rnandc_alloc_dma_buf(rnandc, mtd);
if (ret)
goto cleanup_nand;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(rnandc->dev, "Failed to register MTD device (%d)\n", ret);
goto cleanup_nand;
}
list_add_tail(&rnand->node, &rnandc->chips);
return 0;
cleanup_nand:
nand_cleanup(chip);
return ret;
}
static void rnandc_chips_cleanup(struct rnandc *rnandc)
{
struct rnand_chip *entry, *temp;
struct nand_chip *chip;
int ret;
list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
chip = &entry->chip;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
list_del(&entry->node);
}
}
static int rnandc_chips_init(struct rnandc *rnandc)
{
struct device_node *np;
int ret;
for_each_child_of_node(rnandc->dev->of_node, np) {
ret = rnandc_chip_init(rnandc, np);
if (ret) {
of_node_put(np);
goto cleanup_chips;
}
}
return 0;
cleanup_chips:
rnandc_chips_cleanup(rnandc);
return ret;
}
static int rnandc_probe(struct platform_device *pdev)
{
struct rnandc *rnandc;
struct clk *eclk;
int irq, ret;
rnandc = devm_kzalloc(&pdev->dev, sizeof(*rnandc), GFP_KERNEL);
if (!rnandc)
return -ENOMEM;
rnandc->dev = &pdev->dev;
nand_controller_init(&rnandc->controller);
rnandc->controller.ops = &rnandc_ops;
INIT_LIST_HEAD(&rnandc->chips);
init_completion(&rnandc->complete);
rnandc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rnandc->regs))
return PTR_ERR(rnandc->regs);
devm_pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
/* The external NAND bus clock rate is needed for computing timings */
eclk = clk_get(&pdev->dev, "eclk");
if (IS_ERR(eclk)) {
ret = PTR_ERR(eclk);
goto dis_runtime_pm;
}
rnandc->ext_clk_rate = clk_get_rate(eclk);
clk_put(eclk);
rnandc_dis_interrupts(rnandc);
irq = platform_get_irq_optional(pdev, 0);
if (irq == -EPROBE_DEFER) {
ret = irq;
goto dis_runtime_pm;
} else if (irq < 0) {
dev_info(&pdev->dev, "No IRQ found, fallback to polling\n");
rnandc->use_polling = true;
} else {
ret = devm_request_irq(&pdev->dev, irq, rnandc_irq_handler, 0,
"renesas-nand-controller", rnandc);
if (ret < 0)
goto dis_runtime_pm;
}
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
goto dis_runtime_pm;
rnandc_clear_fifo(rnandc);
platform_set_drvdata(pdev, rnandc);
ret = rnandc_chips_init(rnandc);
if (ret)
goto dis_runtime_pm;
return 0;
dis_runtime_pm:
pm_runtime_put(&pdev->dev);
return ret;
}
static void rnandc_remove(struct platform_device *pdev)
{
struct rnandc *rnandc = platform_get_drvdata(pdev);
rnandc_chips_cleanup(rnandc);
pm_runtime_put(&pdev->dev);
}
static const struct of_device_id rnandc_id_table[] = {
{ .compatible = "renesas,rcar-gen3-nandc" },
{ .compatible = "renesas,rzn1-nandc" },
{} /* sentinel */
};
MODULE_DEVICE_TABLE(of, rnandc_id_table);
static struct platform_driver rnandc_driver = {
.driver = {
.name = "renesas-nandc",
.of_match_table = rnandc_id_table,
},
.probe = rnandc_probe,
.remove_new = rnandc_remove,
};
module_platform_driver(rnandc_driver);
MODULE_AUTHOR("Miquel Raynal <[email protected]>");
MODULE_DESCRIPTION("Renesas R-Car Gen3 & RZ/N1 NAND controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mtd/nand/raw/renesas-nand-controller.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for One Laptop Per Child ‘CAFÉ’ controller, aka Marvell 88ALP01
*
* The data sheet for this device can be found at:
* http://wiki.laptop.org/go/Datasheets
*
* Copyright © 2006 Red Hat, Inc.
* Copyright © 2006 David Woodhouse <[email protected]>
*/
#define DEBUG
#include <linux/device.h>
#undef DEBUG
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/rslib.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <asm/io.h>
#define CAFE_NAND_CTRL1 0x00
#define CAFE_NAND_CTRL2 0x04
#define CAFE_NAND_CTRL3 0x08
#define CAFE_NAND_STATUS 0x0c
#define CAFE_NAND_IRQ 0x10
#define CAFE_NAND_IRQ_MASK 0x14
#define CAFE_NAND_DATA_LEN 0x18
#define CAFE_NAND_ADDR1 0x1c
#define CAFE_NAND_ADDR2 0x20
#define CAFE_NAND_TIMING1 0x24
#define CAFE_NAND_TIMING2 0x28
#define CAFE_NAND_TIMING3 0x2c
#define CAFE_NAND_NONMEM 0x30
#define CAFE_NAND_ECC_RESULT 0x3C
#define CAFE_NAND_DMA_CTRL 0x40
#define CAFE_NAND_DMA_ADDR0 0x44
#define CAFE_NAND_DMA_ADDR1 0x48
#define CAFE_NAND_ECC_SYN01 0x50
#define CAFE_NAND_ECC_SYN23 0x54
#define CAFE_NAND_ECC_SYN45 0x58
#define CAFE_NAND_ECC_SYN67 0x5c
#define CAFE_NAND_READ_DATA 0x1000
#define CAFE_NAND_WRITE_DATA 0x2000
#define CAFE_GLOBAL_CTRL 0x3004
#define CAFE_GLOBAL_IRQ 0x3008
#define CAFE_GLOBAL_IRQ_MASK 0x300c
#define CAFE_NAND_RESET 0x3034
/* Missing from the datasheet: bit 19 of CTRL1 sets CE0 vs. CE1 */
#define CTRL1_CHIPSELECT (1<<19)
struct cafe_priv {
struct nand_chip nand;
struct pci_dev *pdev;
void __iomem *mmio;
struct rs_control *rs;
uint32_t ctl1;
uint32_t ctl2;
int datalen;
int nr_data;
int data_pos;
int page_addr;
bool usedma;
dma_addr_t dmaaddr;
unsigned char *dmabuf;
};
static int usedma = 1;
module_param(usedma, int, 0644);
static int skipbbt = 0;
module_param(skipbbt, int, 0644);
static int debug = 0;
module_param(debug, int, 0644);
static int regdebug = 0;
module_param(regdebug, int, 0644);
static int checkecc = 1;
module_param(checkecc, int, 0644);
static unsigned int numtimings;
static int timing[3];
module_param_array(timing, int, &numtimings, 0644);
static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
/* Hrm. Why isn't this already conditional on something in the struct device? */
#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
/* Make it easier to switch to PIO if we need to */
#define cafe_readl(cafe, addr) readl((cafe)->mmio + CAFE_##addr)
#define cafe_writel(cafe, datum, addr) writel(datum, (cafe)->mmio + CAFE_##addr)
static int cafe_device_ready(struct nand_chip *chip)
{
struct cafe_priv *cafe = nand_get_controller_data(chip);
int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
cafe_writel(cafe, irqs, NAND_IRQ);
cafe_dev_dbg(&cafe->pdev->dev, "NAND device is%s ready, IRQ %x (%x) (%x,%x)\n",
result?"":" not", irqs, cafe_readl(cafe, NAND_IRQ),
cafe_readl(cafe, GLOBAL_IRQ), cafe_readl(cafe, GLOBAL_IRQ_MASK));
return result;
}
static void cafe_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
{
struct cafe_priv *cafe = nand_get_controller_data(chip);
if (cafe->usedma)
memcpy(cafe->dmabuf + cafe->datalen, buf, len);
else
memcpy_toio(cafe->mmio + CAFE_NAND_WRITE_DATA + cafe->datalen, buf, len);
cafe->datalen += len;
cafe_dev_dbg(&cafe->pdev->dev, "Copy 0x%x bytes to write buffer. datalen 0x%x\n",
len, cafe->datalen);
}
static void cafe_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
struct cafe_priv *cafe = nand_get_controller_data(chip);
if (cafe->usedma)
memcpy(buf, cafe->dmabuf + cafe->datalen, len);
else
memcpy_fromio(buf, cafe->mmio + CAFE_NAND_READ_DATA + cafe->datalen, len);
cafe_dev_dbg(&cafe->pdev->dev, "Copy 0x%x bytes from position 0x%x in read buffer.\n",
len, cafe->datalen);
cafe->datalen += len;
}
static uint8_t cafe_read_byte(struct nand_chip *chip)
{
struct cafe_priv *cafe = nand_get_controller_data(chip);
uint8_t d;
cafe_read_buf(chip, &d, 1);
cafe_dev_dbg(&cafe->pdev->dev, "Read %02x\n", d);
return d;
}
static void cafe_nand_cmdfunc(struct nand_chip *chip, unsigned command,
int column, int page_addr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct cafe_priv *cafe = nand_get_controller_data(chip);
int adrbytes = 0;
uint32_t ctl1;
uint32_t doneint = 0x80000000;
cafe_dev_dbg(&cafe->pdev->dev, "cmdfunc %02x, 0x%x, 0x%x\n",
command, column, page_addr);
if (command == NAND_CMD_ERASE2 || command == NAND_CMD_PAGEPROG) {
/* Second half of a command we already calculated */
cafe_writel(cafe, cafe->ctl2 | 0x100 | command, NAND_CTRL2);
ctl1 = cafe->ctl1;
cafe->ctl2 &= ~(1<<30);
cafe_dev_dbg(&cafe->pdev->dev, "Continue command, ctl1 %08x, #data %d\n",
cafe->ctl1, cafe->nr_data);
goto do_command;
}
/* Reset ECC engine */
cafe_writel(cafe, 0, NAND_CTRL2);
/* Emulate NAND_CMD_READOOB on large-page chips */
if (mtd->writesize > 512 &&
command == NAND_CMD_READOOB) {
column += mtd->writesize;
command = NAND_CMD_READ0;
}
/* FIXME: Do we need to send read command before sending data
for small-page chips, to position the buffer correctly? */
if (column != -1) {
cafe_writel(cafe, column, NAND_ADDR1);
adrbytes = 2;
if (page_addr != -1)
goto write_adr2;
} else if (page_addr != -1) {
cafe_writel(cafe, page_addr & 0xffff, NAND_ADDR1);
page_addr >>= 16;
write_adr2:
cafe_writel(cafe, page_addr, NAND_ADDR2);
adrbytes += 2;
if (mtd->size > mtd->writesize << 16)
adrbytes++;
}
cafe->data_pos = cafe->datalen = 0;
/* Set command valid bit, mask in the chip select bit */
ctl1 = 0x80000000 | command | (cafe->ctl1 & CTRL1_CHIPSELECT);
/* Set RD or WR bits as appropriate */
if (command == NAND_CMD_READID || command == NAND_CMD_STATUS) {
ctl1 |= (1<<26); /* rd */
/* Always 5 bytes, for now */
cafe->datalen = 4;
/* And one address cycle -- even for STATUS, since the controller doesn't work without */
adrbytes = 1;
} else if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
command == NAND_CMD_READOOB || command == NAND_CMD_RNDOUT) {
ctl1 |= 1<<26; /* rd */
/* For now, assume just read to end of page */
cafe->datalen = mtd->writesize + mtd->oobsize - column;
} else if (command == NAND_CMD_SEQIN)
ctl1 |= 1<<25; /* wr */
/* Set number of address bytes */
if (adrbytes)
ctl1 |= ((adrbytes-1)|8) << 27;
if (command == NAND_CMD_SEQIN || command == NAND_CMD_ERASE1) {
/* Ignore the first command of a pair; the hardware
deals with them both at once, later */
cafe->ctl1 = ctl1;
cafe_dev_dbg(&cafe->pdev->dev, "Setup for delayed command, ctl1 %08x, dlen %x\n",
cafe->ctl1, cafe->datalen);
return;
}
/* RNDOUT and READ0 commands need a following byte */
if (command == NAND_CMD_RNDOUT)
cafe_writel(cafe, cafe->ctl2 | 0x100 | NAND_CMD_RNDOUTSTART, NAND_CTRL2);
else if (command == NAND_CMD_READ0 && mtd->writesize > 512)
cafe_writel(cafe, cafe->ctl2 | 0x100 | NAND_CMD_READSTART, NAND_CTRL2);
do_command:
cafe_dev_dbg(&cafe->pdev->dev, "dlen %x, ctl1 %x, ctl2 %x\n",
cafe->datalen, ctl1, cafe_readl(cafe, NAND_CTRL2));
/* NB: The datasheet lies -- we really should be subtracting 1 here */
cafe_writel(cafe, cafe->datalen, NAND_DATA_LEN);
cafe_writel(cafe, 0x90000000, NAND_IRQ);
if (cafe->usedma && (ctl1 & (3<<25))) {
uint32_t dmactl = 0xc0000000 + cafe->datalen;
/* If WR or RD bits set, set up DMA */
if (ctl1 & (1<<26)) {
/* It's a read */
dmactl |= (1<<29);
/* ... so it's done when the DMA is done, not just
the command. */
doneint = 0x10000000;
}
cafe_writel(cafe, dmactl, NAND_DMA_CTRL);
}
cafe->datalen = 0;
if (unlikely(regdebug)) {
int i;
printk("About to write command %08x to register 0\n", ctl1);
for (i=4; i< 0x5c; i+=4)
printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
}
cafe_writel(cafe, ctl1, NAND_CTRL1);
/* Apply this short delay always to ensure that we do wait tWB in
* any case on any machine. */
ndelay(100);
if (1) {
int c;
uint32_t irqs;
for (c = 500000; c != 0; c--) {
irqs = cafe_readl(cafe, NAND_IRQ);
if (irqs & doneint)
break;
udelay(1);
if (!(c % 100000))
cafe_dev_dbg(&cafe->pdev->dev, "Wait for ready, IRQ %x\n", irqs);
cpu_relax();
}
cafe_writel(cafe, doneint, NAND_IRQ);
cafe_dev_dbg(&cafe->pdev->dev, "Command %x completed after %d usec, irqs %x (%x)\n",
command, 500000-c, irqs, cafe_readl(cafe, NAND_IRQ));
}
WARN_ON(cafe->ctl2 & (1<<30));
switch (command) {
case NAND_CMD_CACHEDPROG:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
case NAND_CMD_SEQIN:
case NAND_CMD_RNDIN:
case NAND_CMD_STATUS:
case NAND_CMD_RNDOUT:
cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
return;
}
nand_wait_ready(chip);
cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
}
static void cafe_select_chip(struct nand_chip *chip, int chipnr)
{
struct cafe_priv *cafe = nand_get_controller_data(chip);
cafe_dev_dbg(&cafe->pdev->dev, "select_chip %d\n", chipnr);
/* Mask the appropriate bit into the stored value of ctl1
which will be used by cafe_nand_cmdfunc() */
if (chipnr)
cafe->ctl1 |= CTRL1_CHIPSELECT;
else
cafe->ctl1 &= ~CTRL1_CHIPSELECT;
}
static irqreturn_t cafe_nand_interrupt(int irq, void *id)
{
struct mtd_info *mtd = id;
struct nand_chip *chip = mtd_to_nand(mtd);
struct cafe_priv *cafe = nand_get_controller_data(chip);
uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
cafe_writel(cafe, irqs & ~0x90000000, NAND_IRQ);
if (!irqs)
return IRQ_NONE;
cafe_dev_dbg(&cafe->pdev->dev, "irq, bits %x (%x)\n", irqs, cafe_readl(cafe, NAND_IRQ));
return IRQ_HANDLED;
}
static int cafe_nand_write_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
mtd->oobsize);
}
/* Don't use -- use nand_read_oob_std for now */
static int cafe_nand_read_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/**
* cafe_nand_read_page - [REPLACEABLE] hardware ecc syndrome based page read
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller expects OOB data read to chip->oob_poi
* @page: page number to read
*
* The hw generator calculates the error syndrome automatically. Therefore
* we need a special oob layout and handling.
*/
static int cafe_nand_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct cafe_priv *cafe = nand_get_controller_data(chip);
unsigned int max_bitflips = 0;
cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
cafe_readl(cafe, NAND_ECC_RESULT),
cafe_readl(cafe, NAND_ECC_SYN01));
nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
unsigned short syn[8], pat[4];
int pos[4];
u8 *oob = chip->oob_poi;
int i, n;
for (i=0; i<8; i+=2) {
uint32_t tmp = cafe_readl(cafe, NAND_ECC_SYN01 + (i*2));
syn[i] = cafe->rs->codec->index_of[tmp & 0xfff];
syn[i+1] = cafe->rs->codec->index_of[(tmp >> 16) & 0xfff];
}
n = decode_rs16(cafe->rs, NULL, NULL, 1367, syn, 0, pos, 0,
pat);
for (i = 0; i < n; i++) {
int p = pos[i];
/* The 12-bit symbols are mapped to bytes here */
if (p > 1374) {
/* out of range */
n = -1374;
} else if (p == 0) {
/* high four bits do not correspond to data */
if (pat[i] > 0xff)
n = -2048;
else
buf[0] ^= pat[i];
} else if (p == 1365) {
buf[2047] ^= pat[i] >> 4;
oob[0] ^= pat[i] << 4;
} else if (p > 1365) {
if ((p & 1) == 1) {
oob[3*p/2 - 2048] ^= pat[i] >> 4;
oob[3*p/2 - 2047] ^= pat[i] << 4;
} else {
oob[3*p/2 - 2049] ^= pat[i] >> 8;
oob[3*p/2 - 2048] ^= pat[i];
}
} else if ((p & 1) == 1) {
buf[3*p/2] ^= pat[i] >> 4;
buf[3*p/2 + 1] ^= pat[i] << 4;
} else {
buf[3*p/2 - 1] ^= pat[i] >> 8;
buf[3*p/2] ^= pat[i];
}
}
if (n < 0) {
dev_dbg(&cafe->pdev->dev, "Failed to correct ECC at %08x\n",
cafe_readl(cafe, NAND_ADDR2) * 2048);
for (i = 0; i < 0x5c; i += 4)
printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
mtd->ecc_stats.failed++;
} else {
dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
mtd->ecc_stats.corrected += n;
max_bitflips = max_t(unsigned int, max_bitflips, n);
}
}
return max_bitflips;
}
static int cafe_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section)
return -ERANGE;
oobregion->offset = 0;
oobregion->length = chip->ecc.total;
return 0;
}
static int cafe_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section)
return -ERANGE;
oobregion->offset = chip->ecc.total;
oobregion->length = mtd->oobsize - chip->ecc.total;
return 0;
}
static const struct mtd_ooblayout_ops cafe_ooblayout_ops = {
.ecc = cafe_ooblayout_ecc,
.free = cafe_ooblayout_free,
};
/* Ick. The BBT code really ought to be able to work this bit out
for itself from the above, at least for the 2KiB case */
static uint8_t cafe_bbt_pattern_2048[] = { 'B', 'b', 't', '0' };
static uint8_t cafe_mirror_pattern_2048[] = { '1', 't', 'b', 'B' };
static uint8_t cafe_bbt_pattern_512[] = { 0xBB };
static uint8_t cafe_mirror_pattern_512[] = { 0xBC };
static struct nand_bbt_descr cafe_bbt_main_descr_2048 = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION,
.offs = 14,
.len = 4,
.veroffs = 18,
.maxblocks = 4,
.pattern = cafe_bbt_pattern_2048
};
static struct nand_bbt_descr cafe_bbt_mirror_descr_2048 = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION,
.offs = 14,
.len = 4,
.veroffs = 18,
.maxblocks = 4,
.pattern = cafe_mirror_pattern_2048
};
static struct nand_bbt_descr cafe_bbt_main_descr_512 = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION,
.offs = 14,
.len = 1,
.veroffs = 15,
.maxblocks = 4,
.pattern = cafe_bbt_pattern_512
};
static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION,
.offs = 14,
.len = 1,
.veroffs = 15,
.maxblocks = 4,
.pattern = cafe_mirror_pattern_512
};
static int cafe_nand_write_page_lowlevel(struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct cafe_priv *cafe = nand_get_controller_data(chip);
nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
/* Set up ECC autogeneration */
cafe->ctl2 |= (1<<30);
return nand_prog_page_end_op(chip);
}
/* F_2[X]/(X**6+X+1) */
static unsigned short gf64_mul(u8 a, u8 b)
{
u8 c;
unsigned int i;
c = 0;
for (i = 0; i < 6; i++) {
if (a & 1)
c ^= b;
a >>= 1;
b <<= 1;
if ((b & 0x40) != 0)
b ^= 0x43;
}
return c;
}
/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */
static u16 gf4096_mul(u16 a, u16 b)
{
u8 ah, al, bh, bl, ch, cl;
ah = a >> 6;
al = a & 0x3f;
bh = b >> 6;
bl = b & 0x3f;
ch = gf64_mul(ah ^ al, bh ^ bl) ^ gf64_mul(al, bl);
cl = gf64_mul(gf64_mul(ah, bh), 0x21) ^ gf64_mul(al, bl);
return (ch << 6) ^ cl;
}
static int cafe_mul(int x)
{
if (x == 0)
return 1;
return gf4096_mul(x, 0xe01);
}
static int cafe_nand_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct cafe_priv *cafe = nand_get_controller_data(chip);
int err = 0;
cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112,
&cafe->dmaaddr, GFP_KERNEL);
if (!cafe->dmabuf)
return -ENOMEM;
/* Set up DMA address */
cafe_writel(cafe, lower_32_bits(cafe->dmaaddr), NAND_DMA_ADDR0);
cafe_writel(cafe, upper_32_bits(cafe->dmaaddr), NAND_DMA_ADDR1);
cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
/* Restore the DMA flag */
cafe->usedma = usedma;
cafe->ctl2 = BIT(27); /* Reed-Solomon ECC */
if (mtd->writesize == 2048)
cafe->ctl2 |= BIT(29); /* 2KiB page size */
/* Set up ECC according to the type of chip we found */
mtd_set_ooblayout(mtd, &cafe_ooblayout_ops);
if (mtd->writesize == 2048) {
cafe->nand.bbt_td = &cafe_bbt_main_descr_2048;
cafe->nand.bbt_md = &cafe_bbt_mirror_descr_2048;
} else if (mtd->writesize == 512) {
cafe->nand.bbt_td = &cafe_bbt_main_descr_512;
cafe->nand.bbt_md = &cafe_bbt_mirror_descr_512;
} else {
dev_warn(&cafe->pdev->dev,
"Unexpected NAND flash writesize %d. Aborting\n",
mtd->writesize);
err = -ENOTSUPP;
goto out_free_dma;
}
cafe->nand.ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
cafe->nand.ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
cafe->nand.ecc.size = mtd->writesize;
cafe->nand.ecc.bytes = 14;
cafe->nand.ecc.strength = 4;
cafe->nand.ecc.write_page = cafe_nand_write_page_lowlevel;
cafe->nand.ecc.write_oob = cafe_nand_write_oob;
cafe->nand.ecc.read_page = cafe_nand_read_page;
cafe->nand.ecc.read_oob = cafe_nand_read_oob;
return 0;
out_free_dma:
dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
return err;
}
static void cafe_nand_detach_chip(struct nand_chip *chip)
{
struct cafe_priv *cafe = nand_get_controller_data(chip);
dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
}
static const struct nand_controller_ops cafe_nand_controller_ops = {
.attach_chip = cafe_nand_attach_chip,
.detach_chip = cafe_nand_detach_chip,
};
static int cafe_nand_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct mtd_info *mtd;
struct cafe_priv *cafe;
uint32_t ctrl;
int err = 0;
/* Very old versions shared the same PCI ident for all three
functions on the chip. Verify the class too... */
if ((pdev->class >> 8) != PCI_CLASS_MEMORY_FLASH)
return -ENODEV;
err = pci_enable_device(pdev);
if (err)
return err;
pci_set_master(pdev);
cafe = kzalloc(sizeof(*cafe), GFP_KERNEL);
if (!cafe) {
err = -ENOMEM;
goto out_disable_device;
}
mtd = nand_to_mtd(&cafe->nand);
mtd->dev.parent = &pdev->dev;
nand_set_controller_data(&cafe->nand, cafe);
cafe->pdev = pdev;
cafe->mmio = pci_iomap(pdev, 0, 0);
if (!cafe->mmio) {
dev_warn(&pdev->dev, "failed to iomap\n");
err = -ENOMEM;
goto out_free_mtd;
}
cafe->rs = init_rs_non_canonical(12, &cafe_mul, 0, 1, 8);
if (!cafe->rs) {
err = -ENOMEM;
goto out_ior;
}
cafe->nand.legacy.cmdfunc = cafe_nand_cmdfunc;
cafe->nand.legacy.dev_ready = cafe_device_ready;
cafe->nand.legacy.read_byte = cafe_read_byte;
cafe->nand.legacy.read_buf = cafe_read_buf;
cafe->nand.legacy.write_buf = cafe_write_buf;
cafe->nand.legacy.select_chip = cafe_select_chip;
cafe->nand.legacy.set_features = nand_get_set_features_notsupp;
cafe->nand.legacy.get_features = nand_get_set_features_notsupp;
cafe->nand.legacy.chip_delay = 0;
/* Enable the following for a flash based bad block table */
cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
if (skipbbt)
cafe->nand.options |= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK;
if (numtimings && numtimings != 3) {
dev_warn(&cafe->pdev->dev, "%d timing register values ignored; precisely three are required\n", numtimings);
}
if (numtimings == 3) {
cafe_dev_dbg(&cafe->pdev->dev, "Using provided timings (%08x %08x %08x)\n",
timing[0], timing[1], timing[2]);
} else {
timing[0] = cafe_readl(cafe, NAND_TIMING1);
timing[1] = cafe_readl(cafe, NAND_TIMING2);
timing[2] = cafe_readl(cafe, NAND_TIMING3);
if (timing[0] | timing[1] | timing[2]) {
cafe_dev_dbg(&cafe->pdev->dev, "Timing registers already set (%08x %08x %08x)\n",
timing[0], timing[1], timing[2]);
} else {
dev_warn(&cafe->pdev->dev, "Timing registers unset; using most conservative defaults\n");
timing[0] = timing[1] = timing[2] = 0xffffffff;
}
}
/* Start off by resetting the NAND controller completely */
cafe_writel(cafe, 1, NAND_RESET);
cafe_writel(cafe, 0, NAND_RESET);
cafe_writel(cafe, timing[0], NAND_TIMING1);
cafe_writel(cafe, timing[1], NAND_TIMING2);
cafe_writel(cafe, timing[2], NAND_TIMING3);
cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
err = request_irq(pdev->irq, &cafe_nand_interrupt, IRQF_SHARED,
"CAFE NAND", mtd);
if (err) {
dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
goto out_free_rs;
}
/* Disable master reset, enable NAND clock */
ctrl = cafe_readl(cafe, GLOBAL_CTRL);
ctrl &= 0xffffeff0;
ctrl |= 0x00007000;
cafe_writel(cafe, ctrl | 0x05, GLOBAL_CTRL);
cafe_writel(cafe, ctrl | 0x0a, GLOBAL_CTRL);
cafe_writel(cafe, 0, NAND_DMA_CTRL);
cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
/* Enable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
cafe_dev_dbg(&cafe->pdev->dev, "Control %x, IRQ mask %x\n",
cafe_readl(cafe, GLOBAL_CTRL),
cafe_readl(cafe, GLOBAL_IRQ_MASK));
/* Do not use the DMA during the NAND identification */
cafe->usedma = 0;
/* Scan to find existence of the device */
cafe->nand.legacy.dummy_controller.ops = &cafe_nand_controller_ops;
err = nand_scan(&cafe->nand, 2);
if (err)
goto out_irq;
pci_set_drvdata(pdev, mtd);
mtd->name = "cafe_nand";
err = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
if (err)
goto out_cleanup_nand;
goto out;
out_cleanup_nand:
nand_cleanup(&cafe->nand);
out_irq:
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
free_irq(pdev->irq, mtd);
out_free_rs:
free_rs(cafe->rs);
out_ior:
pci_iounmap(pdev, cafe->mmio);
out_free_mtd:
kfree(cafe);
out_disable_device:
pci_disable_device(pdev);
out:
return err;
}
static void cafe_nand_remove(struct pci_dev *pdev)
{
struct mtd_info *mtd = pci_get_drvdata(pdev);
struct nand_chip *chip = mtd_to_nand(mtd);
struct cafe_priv *cafe = nand_get_controller_data(chip);
int ret;
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
free_irq(pdev->irq, mtd);
ret = mtd_device_unregister(mtd);
WARN_ON(ret);
nand_cleanup(chip);
free_rs(cafe->rs);
pci_iounmap(pdev, cafe->mmio);
dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
kfree(cafe);
pci_disable_device(pdev);
}
static const struct pci_device_id cafe_nand_tbl[] = {
{ PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND,
PCI_ANY_ID, PCI_ANY_ID },
{ }
};
MODULE_DEVICE_TABLE(pci, cafe_nand_tbl);
static int cafe_nand_resume(struct pci_dev *pdev)
{
uint32_t ctrl;
struct mtd_info *mtd = pci_get_drvdata(pdev);
struct nand_chip *chip = mtd_to_nand(mtd);
struct cafe_priv *cafe = nand_get_controller_data(chip);
/* Start off by resetting the NAND controller completely */
cafe_writel(cafe, 1, NAND_RESET);
cafe_writel(cafe, 0, NAND_RESET);
cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
/* Restore timing configuration */
cafe_writel(cafe, timing[0], NAND_TIMING1);
cafe_writel(cafe, timing[1], NAND_TIMING2);
cafe_writel(cafe, timing[2], NAND_TIMING3);
/* Disable master reset, enable NAND clock */
ctrl = cafe_readl(cafe, GLOBAL_CTRL);
ctrl &= 0xffffeff0;
ctrl |= 0x00007000;
cafe_writel(cafe, ctrl | 0x05, GLOBAL_CTRL);
cafe_writel(cafe, ctrl | 0x0a, GLOBAL_CTRL);
cafe_writel(cafe, 0, NAND_DMA_CTRL);
cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
/* Set up DMA address */
cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
if (sizeof(cafe->dmaaddr) > 4)
/* Shift in two parts to shut the compiler up */
cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
else
cafe_writel(cafe, 0, NAND_DMA_ADDR1);
/* Enable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
return 0;
}
static struct pci_driver cafe_nand_pci_driver = {
.name = "CAFÉ NAND",
.id_table = cafe_nand_tbl,
.probe = cafe_nand_probe,
.remove = cafe_nand_remove,
.resume = cafe_nand_resume,
};
module_pci_driver(cafe_nand_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]>");
MODULE_DESCRIPTION("NAND flash driver for OLPC CAFÉ chip");
| linux-master | drivers/mtd/nand/raw/cafe_nand.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2002 Thomas Gleixner ([email protected])
*/
#include <linux/sizes.h>
#include "internals.h"
#define LP_OPTIONS 0
#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
#define SP_OPTIONS NAND_NEED_READRDY
#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
/*
* The chip ID list:
* name, device ID, page size, chip size in MiB, eraseblock size, options
*
* If page size and eraseblock size are 0, the sizes are taken from the
* extended chip ID.
*/
struct nand_flash_dev nand_flash_ids[] = {
/*
* Some incompatible NAND chips share device ID's and so must be
* listed by full ID. We list them first so that we can easily identify
* the most specific match.
*/
{"TC58NVG0S3E 1G 3.3V 8-bit",
{ .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} },
SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512), },
{"TC58NVG2S0F 4G 3.3V 8-bit",
{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
{"TC58NVG2S0H 4G 3.3V 8-bit",
{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x16, 0x08, 0x00} },
SZ_4K, SZ_512, SZ_256K, 0, 8, 256, NAND_ECC_INFO(8, SZ_512) },
{"TC58NVG3S0F 8G 3.3V 8-bit",
{ .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
SZ_4K, SZ_1K, SZ_256K, 0, 8, 232, NAND_ECC_INFO(4, SZ_512) },
{"TC58NVG5D2 32G 3.3V 8-bit",
{ .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} },
SZ_8K, SZ_4K, SZ_1M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
{"TC58NVG6D2 64G 3.3V 8-bit",
{ .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
SZ_8K, SZ_8K, SZ_2M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
{"SDTNQGAMA 64G 3.3V 8-bit",
{ .id = {0x45, 0xde, 0x94, 0x93, 0x76, 0x57} },
SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
{"SDTNRGAMA 64G 3.3V 8-bit",
{ .id = {0x45, 0xde, 0x94, 0x93, 0x76, 0x50} },
SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
{"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
{ .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
NAND_ECC_INFO(40, SZ_1K) },
{"H27UCG8T2ETR-BC 64G 3.3V 8-bit",
{ .id = {0xad, 0xde, 0x14, 0xa7, 0x42, 0x4a} },
SZ_16K, SZ_8K, SZ_4M, NAND_NEED_SCRAMBLING, 6, 1664,
NAND_ECC_INFO(40, SZ_1K) },
{"TH58NVG2S3HBAI4 4G 3.3V 8-bit",
{ .id = {0x98, 0xdc, 0x91, 0x15, 0x76} },
SZ_2K, SZ_512, SZ_128K, 0, 5, 128, NAND_ECC_INFO(8, SZ_512) },
{"TH58NVG3S0HBAI4 8G 3.3V 8-bit",
{ .id = {0x98, 0xd3, 0x91, 0x26, 0x76} },
SZ_4K, SZ_1K, SZ_256K, 0, 5, 256, NAND_ECC_INFO(8, SZ_512)},
LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit", 0x33, 16, SZ_16K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit", 0x73, 16, SZ_16K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16),
LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16),
LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit", 0x35, 32, SZ_16K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit", 0x75, 32, SZ_16K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16),
LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16),
LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit", 0x36, 64, SZ_16K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit", 0x76, 64, SZ_16K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16),
LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16),
LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x78, 128, SZ_16K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x39, 128, SZ_16K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit", 0x79, 128, SZ_16K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16),
LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16),
LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16),
LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16),
LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS),
/*
* These are the new chips with large page size. Their page size and
* eraseblock size are determined from the extended ID bytes.
*/
/* 512 Megabit */
EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA2, 64, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA0, 64, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF2, 64, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xD0, 64, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF0, 64, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2, 64, LP_OPTIONS16),
EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0, 64, LP_OPTIONS16),
EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2, 64, LP_OPTIONS16),
EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0, 64, LP_OPTIONS16),
/* 1 Gigabit */
EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit", 0xA1, 128, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xF1, 128, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xD1, 128, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16),
EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16),
EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16),
/* 2 Gigabit */
EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit", 0xAA, 256, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit", 0xDA, 256, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16),
EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16),
/* 4 Gigabit */
EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit", 0xAC, 512, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit", 0xDC, 512, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16),
EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16),
/* 8 Gigabit */
EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit", 0xA3, 1024, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit", 0xD3, 1024, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16),
EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16),
/* 16 Gigabit */
EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit", 0xA5, 2048, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit", 0xD5, 2048, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16),
EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16),
/* 32 Gigabit */
EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit", 0xA7, 4096, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit", 0xD7, 4096, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16),
EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16),
/* 64 Gigabit */
EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit", 0xAE, 8192, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit", 0xDE, 8192, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16),
EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16),
/* 128 Gigabit */
EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit", 0x1A, 16384, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit", 0x3A, 16384, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16),
EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16),
/* 256 Gigabit */
EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit", 0x1C, 32768, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit", 0x3C, 32768, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16),
EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16),
/* 512 Gigabit */
EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit", 0x1E, 65536, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit", 0x3E, 65536, LP_OPTIONS),
EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16),
EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16),
{NULL}
};
/* Manufacturer IDs */
static const struct nand_manufacturer_desc nand_manufacturer_descs[] = {
{NAND_MFR_AMD, "AMD/Spansion", &amd_nand_manuf_ops},
{NAND_MFR_ATO, "ATO"},
{NAND_MFR_EON, "Eon"},
{NAND_MFR_ESMT, "ESMT", &esmt_nand_manuf_ops},
{NAND_MFR_FUJITSU, "Fujitsu"},
{NAND_MFR_HYNIX, "Hynix", &hynix_nand_manuf_ops},
{NAND_MFR_INTEL, "Intel"},
{NAND_MFR_MACRONIX, "Macronix", ¯onix_nand_manuf_ops},
{NAND_MFR_MICRON, "Micron", µn_nand_manuf_ops},
{NAND_MFR_NATIONAL, "National"},
{NAND_MFR_RENESAS, "Renesas"},
{NAND_MFR_SAMSUNG, "Samsung", &samsung_nand_manuf_ops},
{NAND_MFR_SANDISK, "SanDisk", &sandisk_nand_manuf_ops},
{NAND_MFR_STMICRO, "ST Micro"},
{NAND_MFR_TOSHIBA, "Toshiba", &toshiba_nand_manuf_ops},
{NAND_MFR_WINBOND, "Winbond"},
};
/**
* nand_get_manufacturer_desc - Get manufacturer information from the
* manufacturer ID
* @id: manufacturer ID
*
* Returns a nand_manufacturer_desc object if the manufacturer is defined
* in the NAND manufacturers database, NULL otherwise.
*/
const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id)
{
int i;
for (i = 0; i < ARRAY_SIZE(nand_manufacturer_descs); i++)
if (nand_manufacturer_descs[i].id == id)
return &nand_manufacturer_descs[i];
return NULL;
}
| linux-master | drivers/mtd/nand/raw/nand_ids.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Marvell NAND flash controller driver
*
* Copyright (C) 2017 Marvell
* Author: Miquel RAYNAL <[email protected]>
*
*
* This NAND controller driver handles two versions of the hardware,
* one is called NFCv1 and is available on PXA SoCs and the other is
* called NFCv2 and is available on Armada SoCs.
*
* The main visible difference is that NFCv1 only has Hamming ECC
* capabilities, while NFCv2 also embeds a BCH ECC engine. Also, DMA
* is not used with NFCv2.
*
* The ECC layouts are depicted in details in Marvell AN-379, but here
* is a brief description.
*
* When using Hamming, the data is split in 512B chunks (either 1, 2
* or 4) and each chunk will have its own ECC "digest" of 6B at the
* beginning of the OOB area and eventually the remaining free OOB
* bytes (also called "spare" bytes in the driver). This engine
* corrects up to 1 bit per chunk and detects reliably an error if
* there are at most 2 bitflips. Here is the page layout used by the
* controller when Hamming is chosen:
*
* +-------------------------------------------------------------+
* | Data 1 | ... | Data N | ECC 1 | ... | ECCN | Free OOB bytes |
* +-------------------------------------------------------------+
*
* When using the BCH engine, there are N identical (data + free OOB +
* ECC) sections and potentially an extra one to deal with
* configurations where the chosen (data + free OOB + ECC) sizes do
* not align with the page (data + OOB) size. ECC bytes are always
* 30B per ECC chunk. Here is the page layout used by the controller
* when BCH is chosen:
*
* +-----------------------------------------
* | Data 1 | Free OOB bytes 1 | ECC 1 | ...
* +-----------------------------------------
*
* -------------------------------------------
* ... | Data N | Free OOB bytes N | ECC N |
* -------------------------------------------
*
* --------------------------------------------+
* Last Data | Last Free OOB bytes | Last ECC |
* --------------------------------------------+
*
* In both cases, the layout seen by the user is always: all data
* first, then all free OOB bytes and finally all ECC bytes. With BCH,
* ECC bytes are 30B long and are padded with 0xFF to align on 32
* bytes.
*
* The controller has certain limitations that are handled by the
* driver:
* - It can only read 2k at a time. To overcome this limitation, the
* driver issues data cycles on the bus, without issuing new
* CMD + ADDR cycles. The Marvell term is "naked" operations.
* - The ECC strength in BCH mode cannot be tuned. It is fixed 16
* bits. What can be tuned is the ECC block size as long as it
* stays between 512B and 2kiB. It's usually chosen based on the
* chip ECC requirements. For instance, using 2kiB ECC chunks
* provides 4b/512B correctability.
* - The controller will always treat data bytes, free OOB bytes
* and ECC bytes in that order, no matter what the real layout is
* (which is usually all data then all OOB bytes). The
* marvell_nfc_layouts array below contains the currently
* supported layouts.
* - Because of these weird layouts, the Bad Block Markers can be
* located in data section. In this case, the NAND_BBT_NO_OOB_BBM
* option must be set to prevent scanning/writing bad block
* markers.
*/
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/mtd/rawnand.h>
#include <linux/of.h>
#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <asm/unaligned.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/dma/pxa-dma.h>
#include <linux/platform_data/mtd-nand-pxa3xx.h>
/* Data FIFO granularity, FIFO reads/writes must be a multiple of this length */
#define FIFO_DEPTH 8
#define FIFO_REP(x) (x / sizeof(u32))
#define BCH_SEQ_READS (32 / FIFO_DEPTH)
/* NFC does not support transfers of larger chunks at a time */
#define MAX_CHUNK_SIZE 2112
/* NFCv1 cannot read more that 7 bytes of ID */
#define NFCV1_READID_LEN 7
/* Polling is done at a pace of POLL_PERIOD us until POLL_TIMEOUT is reached */
#define POLL_PERIOD 0
#define POLL_TIMEOUT 100000
/* Interrupt maximum wait period in ms */
#define IRQ_TIMEOUT 1000
/* Latency in clock cycles between SoC pins and NFC logic */
#define MIN_RD_DEL_CNT 3
/* Maximum number of contiguous address cycles */
#define MAX_ADDRESS_CYC_NFCV1 5
#define MAX_ADDRESS_CYC_NFCV2 7
/* System control registers/bits to enable the NAND controller on some SoCs */
#define GENCONF_SOC_DEVICE_MUX 0x208
#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
#define GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST BIT(20)
#define GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST BIT(21)
#define GENCONF_SOC_DEVICE_MUX_NFC_INT_EN BIT(25)
#define GENCONF_SOC_DEVICE_MUX_NFC_DEVBUS_ARB_EN BIT(27)
#define GENCONF_CLK_GATING_CTRL 0x220
#define GENCONF_CLK_GATING_CTRL_ND_GATE BIT(2)
#define GENCONF_ND_CLK_CTRL 0x700
#define GENCONF_ND_CLK_CTRL_EN BIT(0)
/* NAND controller data flash control register */
#define NDCR 0x00
#define NDCR_ALL_INT GENMASK(11, 0)
#define NDCR_CS1_CMDDM BIT(7)
#define NDCR_CS0_CMDDM BIT(8)
#define NDCR_RDYM BIT(11)
#define NDCR_ND_ARB_EN BIT(12)
#define NDCR_RA_START BIT(15)
#define NDCR_RD_ID_CNT(x) (min_t(unsigned int, x, 0x7) << 16)
#define NDCR_PAGE_SZ(x) (x >= 2048 ? BIT(24) : 0)
#define NDCR_DWIDTH_M BIT(26)
#define NDCR_DWIDTH_C BIT(27)
#define NDCR_ND_RUN BIT(28)
#define NDCR_DMA_EN BIT(29)
#define NDCR_ECC_EN BIT(30)
#define NDCR_SPARE_EN BIT(31)
#define NDCR_GENERIC_FIELDS_MASK (~(NDCR_RA_START | NDCR_PAGE_SZ(2048) | \
NDCR_DWIDTH_M | NDCR_DWIDTH_C))
/* NAND interface timing parameter 0 register */
#define NDTR0 0x04
#define NDTR0_TRP(x) ((min_t(unsigned int, x, 0xF) & 0x7) << 0)
#define NDTR0_TRH(x) (min_t(unsigned int, x, 0x7) << 3)
#define NDTR0_ETRP(x) ((min_t(unsigned int, x, 0xF) & 0x8) << 3)
#define NDTR0_SEL_NRE_EDGE BIT(7)
#define NDTR0_TWP(x) (min_t(unsigned int, x, 0x7) << 8)
#define NDTR0_TWH(x) (min_t(unsigned int, x, 0x7) << 11)
#define NDTR0_TCS(x) (min_t(unsigned int, x, 0x7) << 16)
#define NDTR0_TCH(x) (min_t(unsigned int, x, 0x7) << 19)
#define NDTR0_RD_CNT_DEL(x) (min_t(unsigned int, x, 0xF) << 22)
#define NDTR0_SELCNTR BIT(26)
#define NDTR0_TADL(x) (min_t(unsigned int, x, 0x1F) << 27)
/* NAND interface timing parameter 1 register */
#define NDTR1 0x0C
#define NDTR1_TAR(x) (min_t(unsigned int, x, 0xF) << 0)
#define NDTR1_TWHR(x) (min_t(unsigned int, x, 0xF) << 4)
#define NDTR1_TRHW(x) (min_t(unsigned int, x / 16, 0x3) << 8)
#define NDTR1_PRESCALE BIT(14)
#define NDTR1_WAIT_MODE BIT(15)
#define NDTR1_TR(x) (min_t(unsigned int, x, 0xFFFF) << 16)
/* NAND controller status register */
#define NDSR 0x14
#define NDSR_WRCMDREQ BIT(0)
#define NDSR_RDDREQ BIT(1)
#define NDSR_WRDREQ BIT(2)
#define NDSR_CORERR BIT(3)
#define NDSR_UNCERR BIT(4)
#define NDSR_CMDD(cs) BIT(8 - cs)
#define NDSR_RDY(rb) BIT(11 + rb)
#define NDSR_ERRCNT(x) ((x >> 16) & 0x1F)
/* NAND ECC control register */
#define NDECCCTRL 0x28
#define NDECCCTRL_BCH_EN BIT(0)
/* NAND controller data buffer register */
#define NDDB 0x40
/* NAND controller command buffer 0 register */
#define NDCB0 0x48
#define NDCB0_CMD1(x) ((x & 0xFF) << 0)
#define NDCB0_CMD2(x) ((x & 0xFF) << 8)
#define NDCB0_ADDR_CYC(x) ((x & 0x7) << 16)
#define NDCB0_ADDR_GET_NUM_CYC(x) (((x) >> 16) & 0x7)
#define NDCB0_DBC BIT(19)
#define NDCB0_CMD_TYPE(x) ((x & 0x7) << 21)
#define NDCB0_CSEL BIT(24)
#define NDCB0_RDY_BYP BIT(27)
#define NDCB0_LEN_OVRD BIT(28)
#define NDCB0_CMD_XTYPE(x) ((x & 0x7) << 29)
/* NAND controller command buffer 1 register */
#define NDCB1 0x4C
#define NDCB1_COLS(x) ((x & 0xFFFF) << 0)
#define NDCB1_ADDRS_PAGE(x) (x << 16)
/* NAND controller command buffer 2 register */
#define NDCB2 0x50
#define NDCB2_ADDR5_PAGE(x) (((x >> 16) & 0xFF) << 0)
#define NDCB2_ADDR5_CYC(x) ((x & 0xFF) << 0)
/* NAND controller command buffer 3 register */
#define NDCB3 0x54
#define NDCB3_ADDR6_CYC(x) ((x & 0xFF) << 16)
#define NDCB3_ADDR7_CYC(x) ((x & 0xFF) << 24)
/* NAND controller command buffer 0 register 'type' and 'xtype' fields */
#define TYPE_READ 0
#define TYPE_WRITE 1
#define TYPE_ERASE 2
#define TYPE_READ_ID 3
#define TYPE_STATUS 4
#define TYPE_RESET 5
#define TYPE_NAKED_CMD 6
#define TYPE_NAKED_ADDR 7
#define TYPE_MASK 7
#define XTYPE_MONOLITHIC_RW 0
#define XTYPE_LAST_NAKED_RW 1
#define XTYPE_FINAL_COMMAND 3
#define XTYPE_READ 4
#define XTYPE_WRITE_DISPATCH 4
#define XTYPE_NAKED_RW 5
#define XTYPE_COMMAND_DISPATCH 6
#define XTYPE_MASK 7
/**
* struct marvell_hw_ecc_layout - layout of Marvell ECC
*
* Marvell ECC engine works differently than the others, in order to limit the
* size of the IP, hardware engineers chose to set a fixed strength at 16 bits
* per subpage, and depending on a the desired strength needed by the NAND chip,
* a particular layout mixing data/spare/ecc is defined, with a possible last
* chunk smaller that the others.
*
* @writesize: Full page size on which the layout applies
* @chunk: Desired ECC chunk size on which the layout applies
* @strength: Desired ECC strength (per chunk size bytes) on which the
* layout applies
* @nchunks: Total number of chunks
* @full_chunk_cnt: Number of full-sized chunks, which is the number of
* repetitions of the pattern:
* (data_bytes + spare_bytes + ecc_bytes).
* @data_bytes: Number of data bytes per chunk
* @spare_bytes: Number of spare bytes per chunk
* @ecc_bytes: Number of ecc bytes per chunk
* @last_data_bytes: Number of data bytes in the last chunk
* @last_spare_bytes: Number of spare bytes in the last chunk
* @last_ecc_bytes: Number of ecc bytes in the last chunk
*/
struct marvell_hw_ecc_layout {
/* Constraints */
int writesize;
int chunk;
int strength;
/* Corresponding layout */
int nchunks;
int full_chunk_cnt;
int data_bytes;
int spare_bytes;
int ecc_bytes;
int last_data_bytes;
int last_spare_bytes;
int last_ecc_bytes;
};
#define MARVELL_LAYOUT(ws, dc, ds, nc, fcc, db, sb, eb, ldb, lsb, leb) \
{ \
.writesize = ws, \
.chunk = dc, \
.strength = ds, \
.nchunks = nc, \
.full_chunk_cnt = fcc, \
.data_bytes = db, \
.spare_bytes = sb, \
.ecc_bytes = eb, \
.last_data_bytes = ldb, \
.last_spare_bytes = lsb, \
.last_ecc_bytes = leb, \
}
/* Layouts explained in AN-379_Marvell_SoC_NFC_ECC */
static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
MARVELL_LAYOUT( 512, 512, 1, 1, 1, 512, 8, 8, 0, 0, 0),
MARVELL_LAYOUT( 2048, 512, 1, 1, 1, 2048, 40, 24, 0, 0, 0),
MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,32, 30),
MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,64, 30),
MARVELL_LAYOUT( 2048, 512, 12, 3, 2, 704, 0, 30,640, 0, 30),
MARVELL_LAYOUT( 2048, 512, 16, 5, 4, 512, 0, 30, 0, 32, 30),
MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
MARVELL_LAYOUT( 4096, 512, 12, 6, 5, 704, 0, 30,576, 32, 30),
MARVELL_LAYOUT( 4096, 512, 16, 9, 8, 512, 0, 30, 0, 32, 30),
MARVELL_LAYOUT( 8192, 512, 4, 4, 4, 2048, 0, 30, 0, 0, 0),
MARVELL_LAYOUT( 8192, 512, 8, 9, 8, 1024, 0, 30, 0, 160, 30),
MARVELL_LAYOUT( 8192, 512, 12, 12, 11, 704, 0, 30,448, 64, 30),
MARVELL_LAYOUT( 8192, 512, 16, 17, 16, 512, 0, 30, 0, 32, 30),
};
/**
* struct marvell_nand_chip_sel - CS line description
*
* The Nand Flash Controller has up to 4 CE and 2 RB pins. The CE selection
* is made by a field in NDCB0 register, and in another field in NDCB2 register.
* The datasheet describes the logic with an error: ADDR5 field is once
* declared at the beginning of NDCB2, and another time at its end. Because the
* ADDR5 field of NDCB2 may be used by other bytes, it would be more logical
* to use the last bit of this field instead of the first ones.
*
* @cs: Wanted CE lane.
* @ndcb0_csel: Value of the NDCB0 register with or without the flag
* selecting the wanted CE lane. This is set once when
* the Device Tree is probed.
* @rb: Ready/Busy pin for the flash chip
*/
struct marvell_nand_chip_sel {
unsigned int cs;
u32 ndcb0_csel;
unsigned int rb;
};
/**
* struct marvell_nand_chip - stores NAND chip device related information
*
* @chip: Base NAND chip structure
* @node: Used to store NAND chips into a list
* @layout: NAND layout when using hardware ECC
* @ndcr: Controller register value for this NAND chip
* @ndtr0: Timing registers 0 value for this NAND chip
* @ndtr1: Timing registers 1 value for this NAND chip
* @addr_cyc: Amount of cycles needed to pass column address
* @selected_die: Current active CS
* @nsels: Number of CS lines required by the NAND chip
* @sels: Array of CS lines descriptions
*/
struct marvell_nand_chip {
struct nand_chip chip;
struct list_head node;
const struct marvell_hw_ecc_layout *layout;
u32 ndcr;
u32 ndtr0;
u32 ndtr1;
int addr_cyc;
int selected_die;
unsigned int nsels;
struct marvell_nand_chip_sel sels[];
};
static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
{
return container_of(chip, struct marvell_nand_chip, chip);
}
static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip
*nand)
{
return &nand->sels[nand->selected_die];
}
/**
* struct marvell_nfc_caps - NAND controller capabilities for distinction
* between compatible strings
*
* @max_cs_nb: Number of Chip Select lines available
* @max_rb_nb: Number of Ready/Busy lines available
* @need_system_controller: Indicates if the SoC needs to have access to the
* system controller (ie. to enable the NAND controller)
* @legacy_of_bindings: Indicates if DT parsing must be done using the old
* fashion way
* @is_nfcv2: NFCv2 has numerous enhancements compared to NFCv1, ie.
* BCH error detection and correction algorithm,
* NDCB3 register has been added
* @use_dma: Use dma for data transfers
* @max_mode_number: Maximum timing mode supported by the controller
*/
struct marvell_nfc_caps {
unsigned int max_cs_nb;
unsigned int max_rb_nb;
bool need_system_controller;
bool legacy_of_bindings;
bool is_nfcv2;
bool use_dma;
unsigned int max_mode_number;
};
/**
* struct marvell_nfc - stores Marvell NAND controller information
*
* @controller: Base controller structure
* @dev: Parent device (used to print error messages)
* @regs: NAND controller registers
* @core_clk: Core clock
* @reg_clk: Registers clock
* @complete: Completion object to wait for NAND controller events
* @assigned_cs: Bitmask describing already assigned CS lines
* @chips: List containing all the NAND chips attached to
* this NAND controller
* @selected_chip: Currently selected target chip
* @caps: NAND controller capabilities for each compatible string
* @use_dma: Whetner DMA is used
* @dma_chan: DMA channel (NFCv1 only)
* @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only)
*/
struct marvell_nfc {
struct nand_controller controller;
struct device *dev;
void __iomem *regs;
struct clk *core_clk;
struct clk *reg_clk;
struct completion complete;
unsigned long assigned_cs;
struct list_head chips;
struct nand_chip *selected_chip;
const struct marvell_nfc_caps *caps;
/* DMA (NFCv1 only) */
bool use_dma;
struct dma_chan *dma_chan;
u8 *dma_buf;
};
static inline struct marvell_nfc *to_marvell_nfc(struct nand_controller *ctrl)
{
return container_of(ctrl, struct marvell_nfc, controller);
}
/**
* struct marvell_nfc_timings - NAND controller timings expressed in NAND
* Controller clock cycles
*
* @tRP: ND_nRE pulse width
* @tRH: ND_nRE high duration
* @tWP: ND_nWE pulse time
* @tWH: ND_nWE high duration
* @tCS: Enable signal setup time
* @tCH: Enable signal hold time
* @tADL: Address to write data delay
* @tAR: ND_ALE low to ND_nRE low delay
* @tWHR: ND_nWE high to ND_nRE low for status read
* @tRHW: ND_nRE high duration, read to write delay
* @tR: ND_nWE high to ND_nRE low for read
*/
struct marvell_nfc_timings {
/* NDTR0 fields */
unsigned int tRP;
unsigned int tRH;
unsigned int tWP;
unsigned int tWH;
unsigned int tCS;
unsigned int tCH;
unsigned int tADL;
/* NDTR1 fields */
unsigned int tAR;
unsigned int tWHR;
unsigned int tRHW;
unsigned int tR;
};
/**
* TO_CYCLES() - Derives a duration in numbers of clock cycles.
*
* @ps: Duration in pico-seconds
* @period_ns: Clock period in nano-seconds
*
* Convert the duration in nano-seconds, then divide by the period and
* return the number of clock periods.
*/
#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns))
#define TO_CYCLES64(ps, period_ns) (DIV_ROUND_UP_ULL(div_u64(ps, 1000), \
period_ns))
/**
* struct marvell_nfc_op - filled during the parsing of the ->exec_op()
* subop subset of instructions.
*
* @ndcb: Array of values written to NDCBx registers
* @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle
* @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
* @rdy_delay_ns: Optional delay after waiting for the RB pin
* @data_delay_ns: Optional delay after the data xfer
* @data_instr_idx: Index of the data instruction in the subop
* @data_instr: Pointer to the data instruction in the subop
*/
struct marvell_nfc_op {
u32 ndcb[4];
unsigned int cle_ale_delay_ns;
unsigned int rdy_timeout_ms;
unsigned int rdy_delay_ns;
unsigned int data_delay_ns;
unsigned int data_instr_idx;
const struct nand_op_instr *data_instr;
};
/*
* Internal helper to conditionnally apply a delay (from the above structure,
* most of the time).
*/
static void cond_delay(unsigned int ns)
{
if (!ns)
return;
if (ns < 10000)
ndelay(ns);
else
udelay(DIV_ROUND_UP(ns, 1000));
}
/*
* The controller has many flags that could generate interrupts, most of them
* are disabled and polling is used. For the very slow signals, using interrupts
* may relax the CPU charge.
*/
static void marvell_nfc_disable_int(struct marvell_nfc *nfc, u32 int_mask)
{
u32 reg;
/* Writing 1 disables the interrupt */
reg = readl_relaxed(nfc->regs + NDCR);
writel_relaxed(reg | int_mask, nfc->regs + NDCR);
}
static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
{
u32 reg;
/* Writing 0 enables the interrupt */
reg = readl_relaxed(nfc->regs + NDCR);
writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
}
static u32 marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
{
u32 reg;
reg = readl_relaxed(nfc->regs + NDSR);
writel_relaxed(int_mask, nfc->regs + NDSR);
return reg & int_mask;
}
static void marvell_nfc_force_byte_access(struct nand_chip *chip,
bool force_8bit)
{
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
u32 ndcr;
/*
* Callers of this function do not verify if the NAND is using a 16-bit
* an 8-bit bus for normal operations, so we need to take care of that
* here by leaving the configuration unchanged if the NAND does not have
* the NAND_BUSWIDTH_16 flag set.
*/
if (!(chip->options & NAND_BUSWIDTH_16))
return;
ndcr = readl_relaxed(nfc->regs + NDCR);
if (force_8bit)
ndcr &= ~(NDCR_DWIDTH_M | NDCR_DWIDTH_C);
else
ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
writel_relaxed(ndcr, nfc->regs + NDCR);
}
static int marvell_nfc_wait_ndrun(struct nand_chip *chip)
{
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
u32 val;
int ret;
/*
* The command is being processed, wait for the ND_RUN bit to be
* cleared by the NFC. If not, we must clear it by hand.
*/
ret = readl_relaxed_poll_timeout(nfc->regs + NDCR, val,
(val & NDCR_ND_RUN) == 0,
POLL_PERIOD, POLL_TIMEOUT);
if (ret) {
dev_err(nfc->dev, "Timeout on NAND controller run mode\n");
writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
nfc->regs + NDCR);
return ret;
}
return 0;
}
/*
* Any time a command has to be sent to the controller, the following sequence
* has to be followed:
* - call marvell_nfc_prepare_cmd()
* -> activate the ND_RUN bit that will kind of 'start a job'
* -> wait the signal indicating the NFC is waiting for a command
* - send the command (cmd and address cycles)
* - enventually send or receive the data
* - call marvell_nfc_end_cmd() with the corresponding flag
* -> wait the flag to be triggered or cancel the job with a timeout
*
* The following helpers are here to factorize the code a bit so that
* specialized functions responsible for executing the actual NAND
* operations do not have to replicate the same code blocks.
*/
static int marvell_nfc_prepare_cmd(struct nand_chip *chip)
{
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
u32 ndcr, val;
int ret;
/* Poll ND_RUN and clear NDSR before issuing any command */
ret = marvell_nfc_wait_ndrun(chip);
if (ret) {
dev_err(nfc->dev, "Last operation did not succeed\n");
return ret;
}
ndcr = readl_relaxed(nfc->regs + NDCR);
writel_relaxed(readl(nfc->regs + NDSR), nfc->regs + NDSR);
/* Assert ND_RUN bit and wait the NFC to be ready */
writel_relaxed(ndcr | NDCR_ND_RUN, nfc->regs + NDCR);
ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
val & NDSR_WRCMDREQ,
POLL_PERIOD, POLL_TIMEOUT);
if (ret) {
dev_err(nfc->dev, "Timeout on WRCMDRE\n");
return -ETIMEDOUT;
}
/* Command may be written, clear WRCMDREQ status bit */
writel_relaxed(NDSR_WRCMDREQ, nfc->regs + NDSR);
return 0;
}
static void marvell_nfc_send_cmd(struct nand_chip *chip,
struct marvell_nfc_op *nfc_op)
{
struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
dev_dbg(nfc->dev, "\nNDCR: 0x%08x\n"
"NDCB0: 0x%08x\nNDCB1: 0x%08x\nNDCB2: 0x%08x\nNDCB3: 0x%08x\n",
(u32)readl_relaxed(nfc->regs + NDCR), nfc_op->ndcb[0],
nfc_op->ndcb[1], nfc_op->ndcb[2], nfc_op->ndcb[3]);
writel_relaxed(to_nand_sel(marvell_nand)->ndcb0_csel | nfc_op->ndcb[0],
nfc->regs + NDCB0);
writel_relaxed(nfc_op->ndcb[1], nfc->regs + NDCB0);
writel(nfc_op->ndcb[2], nfc->regs + NDCB0);
/*
* Write NDCB0 four times only if LEN_OVRD is set or if ADDR6 or ADDR7
* fields are used (only available on NFCv2).
*/
if (nfc_op->ndcb[0] & NDCB0_LEN_OVRD ||
NDCB0_ADDR_GET_NUM_CYC(nfc_op->ndcb[0]) >= 6) {
if (!WARN_ON_ONCE(!nfc->caps->is_nfcv2))
writel(nfc_op->ndcb[3], nfc->regs + NDCB0);
}
}
static int marvell_nfc_end_cmd(struct nand_chip *chip, int flag,
const char *label)
{
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
u32 val;
int ret;
ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
val & flag,
POLL_PERIOD, POLL_TIMEOUT);
if (ret) {
dev_err(nfc->dev, "Timeout on %s (NDSR: 0x%08x)\n",
label, val);
if (nfc->dma_chan)
dmaengine_terminate_all(nfc->dma_chan);
return ret;
}
/*
* DMA function uses this helper to poll on CMDD bits without wanting
* them to be cleared.
*/
if (nfc->use_dma && (readl_relaxed(nfc->regs + NDCR) & NDCR_DMA_EN))
return 0;
writel_relaxed(flag, nfc->regs + NDSR);
return 0;
}
static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
{
struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
int cs_flag = NDSR_CMDD(to_nand_sel(marvell_nand)->ndcb0_csel);
return marvell_nfc_end_cmd(chip, cs_flag, "CMDD");
}
static int marvell_nfc_poll_status(struct marvell_nfc *nfc, u32 mask,
u32 expected_val, unsigned long timeout_ms)
{
unsigned long limit;
u32 st;
limit = jiffies + msecs_to_jiffies(timeout_ms);
do {
st = readl_relaxed(nfc->regs + NDSR);
if (st & NDSR_RDY(1))
st |= NDSR_RDY(0);
if ((st & mask) == expected_val)
return 0;
cpu_relax();
} while (time_after(limit, jiffies));
return -ETIMEDOUT;
}
static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
{
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
u32 pending;
int ret;
/* Timeout is expressed in ms */
if (!timeout_ms)
timeout_ms = IRQ_TIMEOUT;
if (mtd->oops_panic_write) {
ret = marvell_nfc_poll_status(nfc, NDSR_RDY(0),
NDSR_RDY(0),
timeout_ms);
} else {
init_completion(&nfc->complete);
marvell_nfc_enable_int(nfc, NDCR_RDYM);
ret = wait_for_completion_timeout(&nfc->complete,
msecs_to_jiffies(timeout_ms));
marvell_nfc_disable_int(nfc, NDCR_RDYM);
}
pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
/*
* In case the interrupt was not served in the required time frame,
* check if the ISR was not served or if something went actually wrong.
*/
if (!ret && !pending) {
dev_err(nfc->dev, "Timeout waiting for RB signal\n");
return -ETIMEDOUT;
}
return 0;
}
static void marvell_nfc_select_target(struct nand_chip *chip,
unsigned int die_nr)
{
struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
u32 ndcr_generic;
/*
* Reset the NDCR register to a clean state for this particular chip,
* also clear ND_RUN bit.
*/
ndcr_generic = readl_relaxed(nfc->regs + NDCR) &
NDCR_GENERIC_FIELDS_MASK & ~NDCR_ND_RUN;
writel_relaxed(ndcr_generic | marvell_nand->ndcr, nfc->regs + NDCR);
/* Also reset the interrupt status register */
marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
return;
writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
nfc->selected_chip = chip;
marvell_nand->selected_die = die_nr;
}
static irqreturn_t marvell_nfc_isr(int irq, void *dev_id)
{
struct marvell_nfc *nfc = dev_id;
u32 st = readl_relaxed(nfc->regs + NDSR);
u32 ien = (~readl_relaxed(nfc->regs + NDCR)) & NDCR_ALL_INT;
/*
* RDY interrupt mask is one bit in NDCR while there are two status
* bit in NDSR (RDY[cs0/cs2] and RDY[cs1/cs3]).
*/
if (st & NDSR_RDY(1))
st |= NDSR_RDY(0);
if (!(st & ien))
return IRQ_NONE;
marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT);
if (st & (NDSR_RDY(0) | NDSR_RDY(1)))
complete(&nfc->complete);
return IRQ_HANDLED;
}
/* HW ECC related functions */
static void marvell_nfc_enable_hw_ecc(struct nand_chip *chip)
{
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
u32 ndcr = readl_relaxed(nfc->regs + NDCR);
if (!(ndcr & NDCR_ECC_EN)) {
writel_relaxed(ndcr | NDCR_ECC_EN, nfc->regs + NDCR);
/*
* When enabling BCH, set threshold to 0 to always know the
* number of corrected bitflips.
*/
if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
writel_relaxed(NDECCCTRL_BCH_EN, nfc->regs + NDECCCTRL);
}
}
static void marvell_nfc_disable_hw_ecc(struct nand_chip *chip)
{
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
u32 ndcr = readl_relaxed(nfc->regs + NDCR);
if (ndcr & NDCR_ECC_EN) {
writel_relaxed(ndcr & ~NDCR_ECC_EN, nfc->regs + NDCR);
if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
writel_relaxed(0, nfc->regs + NDECCCTRL);
}
}
/* DMA related helpers */
static void marvell_nfc_enable_dma(struct marvell_nfc *nfc)
{
u32 reg;
reg = readl_relaxed(nfc->regs + NDCR);
writel_relaxed(reg | NDCR_DMA_EN, nfc->regs + NDCR);
}
static void marvell_nfc_disable_dma(struct marvell_nfc *nfc)
{
u32 reg;
reg = readl_relaxed(nfc->regs + NDCR);
writel_relaxed(reg & ~NDCR_DMA_EN, nfc->regs + NDCR);
}
/* Read/write PIO/DMA accessors */
static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
enum dma_data_direction direction,
unsigned int len)
{
unsigned int dma_len = min_t(int, ALIGN(len, 32), MAX_CHUNK_SIZE);
struct dma_async_tx_descriptor *tx;
struct scatterlist sg;
dma_cookie_t cookie;
int ret;
marvell_nfc_enable_dma(nfc);
/* Prepare the DMA transfer */
sg_init_one(&sg, nfc->dma_buf, dma_len);
ret = dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
if (!ret) {
dev_err(nfc->dev, "Could not map DMA S/G list\n");
return -ENXIO;
}
tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1,
direction == DMA_FROM_DEVICE ?
DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(nfc->dev, "Could not prepare DMA S/G list\n");
dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
return -ENXIO;
}
/* Do the task and wait for it to finish */
cookie = dmaengine_submit(tx);
ret = dma_submit_error(cookie);
if (ret)
return -EIO;
dma_async_issue_pending(nfc->dma_chan);
ret = marvell_nfc_wait_cmdd(nfc->selected_chip);
dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
marvell_nfc_disable_dma(nfc);
if (ret) {
dev_err(nfc->dev, "Timeout waiting for DMA (status: %d)\n",
dmaengine_tx_status(nfc->dma_chan, cookie, NULL));
dmaengine_terminate_all(nfc->dma_chan);
return -ETIMEDOUT;
}
return 0;
}
static int marvell_nfc_xfer_data_in_pio(struct marvell_nfc *nfc, u8 *in,
unsigned int len)
{
unsigned int last_len = len % FIFO_DEPTH;
unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
int i;
for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
ioread32_rep(nfc->regs + NDDB, in + i, FIFO_REP(FIFO_DEPTH));
if (last_len) {
u8 tmp_buf[FIFO_DEPTH];
ioread32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
memcpy(in + last_full_offset, tmp_buf, last_len);
}
return 0;
}
static int marvell_nfc_xfer_data_out_pio(struct marvell_nfc *nfc, const u8 *out,
unsigned int len)
{
unsigned int last_len = len % FIFO_DEPTH;
unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
int i;
for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
iowrite32_rep(nfc->regs + NDDB, out + i, FIFO_REP(FIFO_DEPTH));
if (last_len) {
u8 tmp_buf[FIFO_DEPTH];
memcpy(tmp_buf, out + last_full_offset, last_len);
iowrite32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
}
return 0;
}
static void marvell_nfc_check_empty_chunk(struct nand_chip *chip,
u8 *data, int data_len,
u8 *spare, int spare_len,
u8 *ecc, int ecc_len,
unsigned int *max_bitflips)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int bf;
/*
* Blank pages (all 0xFF) that have not been written may be recognized
* as bad if bitflips occur, so whenever an uncorrectable error occurs,
* check if the entire page (with ECC bytes) is actually blank or not.
*/
if (!data)
data_len = 0;
if (!spare)
spare_len = 0;
if (!ecc)
ecc_len = 0;
bf = nand_check_erased_ecc_chunk(data, data_len, ecc, ecc_len,
spare, spare_len, chip->ecc.strength);
if (bf < 0) {
mtd->ecc_stats.failed++;
return;
}
/* Update the stats and max_bitflips */
mtd->ecc_stats.corrected += bf;
*max_bitflips = max_t(unsigned int, *max_bitflips, bf);
}
/*
* Check if a chunk is correct or not according to the hardware ECC engine.
* mtd->ecc_stats.corrected is updated, as well as max_bitflips, however
* mtd->ecc_stats.failure is not, the function will instead return a non-zero
* value indicating that a check on the emptyness of the subpage must be
* performed before actually declaring the subpage as "corrupted".
*/
static int marvell_nfc_hw_ecc_check_bitflips(struct nand_chip *chip,
unsigned int *max_bitflips)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
int bf = 0;
u32 ndsr;
ndsr = readl_relaxed(nfc->regs + NDSR);
/* Check uncorrectable error flag */
if (ndsr & NDSR_UNCERR) {
writel_relaxed(ndsr, nfc->regs + NDSR);
/*
* Do not increment ->ecc_stats.failed now, instead, return a
* non-zero value to indicate that this chunk was apparently
* bad, and it should be check to see if it empty or not. If
* the chunk (with ECC bytes) is not declared empty, the calling
* function must increment the failure count.
*/
return -EBADMSG;
}
/* Check correctable error flag */
if (ndsr & NDSR_CORERR) {
writel_relaxed(ndsr, nfc->regs + NDSR);
if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
bf = NDSR_ERRCNT(ndsr);
else
bf = 1;
}
/* Update the stats and max_bitflips */
mtd->ecc_stats.corrected += bf;
*max_bitflips = max_t(unsigned int, *max_bitflips, bf);
return 0;
}
/* Hamming read helpers */
static int marvell_nfc_hw_ecc_hmg_do_read_page(struct nand_chip *chip,
u8 *data_buf, u8 *oob_buf,
bool raw, int page)
{
struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
struct marvell_nfc_op nfc_op = {
.ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
NDCB0_DBC |
NDCB0_CMD1(NAND_CMD_READ0) |
NDCB0_CMD2(NAND_CMD_READSTART),
.ndcb[1] = NDCB1_ADDRS_PAGE(page),
.ndcb[2] = NDCB2_ADDR5_PAGE(page),
};
unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
int ret;
/* NFCv2 needs more information about the operation being executed */
if (nfc->caps->is_nfcv2)
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
ret = marvell_nfc_prepare_cmd(chip);
if (ret)
return ret;
marvell_nfc_send_cmd(chip, &nfc_op);
ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
"RDDREQ while draining FIFO (data/oob)");
if (ret)
return ret;
/*
* Read the page then the OOB area. Unlike what is shown in current
* documentation, spare bytes are protected by the ECC engine, and must
* be at the beginning of the OOB area or running this driver on legacy
* systems will prevent the discovery of the BBM/BBT.
*/
if (nfc->use_dma) {
marvell_nfc_xfer_data_dma(nfc, DMA_FROM_DEVICE,
lt->data_bytes + oob_bytes);
memcpy(data_buf, nfc->dma_buf, lt->data_bytes);
memcpy(oob_buf, nfc->dma_buf + lt->data_bytes, oob_bytes);
} else {
marvell_nfc_xfer_data_in_pio(nfc, data_buf, lt->data_bytes);
marvell_nfc_xfer_data_in_pio(nfc, oob_buf, oob_bytes);
}
ret = marvell_nfc_wait_cmdd(chip);
return ret;
}
static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
marvell_nfc_select_target(chip, chip->cur_cs);
return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
true, page);
}
static int marvell_nfc_hw_ecc_hmg_read_page(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
unsigned int full_sz = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
int max_bitflips = 0, ret;
u8 *raw_buf;
marvell_nfc_select_target(chip, chip->cur_cs);
marvell_nfc_enable_hw_ecc(chip);
marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
page);
ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips);
marvell_nfc_disable_hw_ecc(chip);
if (!ret)
return max_bitflips;
/*
* When ECC failures are detected, check if the full page has been
* written or not. Ignore the failure if it is actually empty.
*/
raw_buf = kmalloc(full_sz, GFP_KERNEL);
if (!raw_buf)
return -ENOMEM;
marvell_nfc_hw_ecc_hmg_do_read_page(chip, raw_buf, raw_buf +
lt->data_bytes, true, page);
marvell_nfc_check_empty_chunk(chip, raw_buf, full_sz, NULL, 0, NULL, 0,
&max_bitflips);
kfree(raw_buf);
return max_bitflips;
}
/*
* Spare area in Hamming layouts is not protected by the ECC engine (even if
* it appears before the ECC bytes when reading), the ->read_oob_raw() function
* also stands for ->read_oob().
*/
static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct nand_chip *chip, int page)
{
u8 *buf = nand_get_data_buf(chip);
marvell_nfc_select_target(chip, chip->cur_cs);
return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
true, page);
}
/* Hamming write helpers */
static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
const u8 *data_buf,
const u8 *oob_buf, bool raw,
int page)
{
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(nand_get_interface_config(chip));
struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
struct marvell_nfc_op nfc_op = {
.ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) |
NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
NDCB0_CMD1(NAND_CMD_SEQIN) |
NDCB0_CMD2(NAND_CMD_PAGEPROG) |
NDCB0_DBC,
.ndcb[1] = NDCB1_ADDRS_PAGE(page),
.ndcb[2] = NDCB2_ADDR5_PAGE(page),
};
unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
int ret;
/* NFCv2 needs more information about the operation being executed */
if (nfc->caps->is_nfcv2)
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
ret = marvell_nfc_prepare_cmd(chip);
if (ret)
return ret;
marvell_nfc_send_cmd(chip, &nfc_op);
ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
"WRDREQ while loading FIFO (data)");
if (ret)
return ret;
/* Write the page then the OOB area */
if (nfc->use_dma) {
memcpy(nfc->dma_buf, data_buf, lt->data_bytes);
memcpy(nfc->dma_buf + lt->data_bytes, oob_buf, oob_bytes);
marvell_nfc_xfer_data_dma(nfc, DMA_TO_DEVICE, lt->data_bytes +
lt->ecc_bytes + lt->spare_bytes);
} else {
marvell_nfc_xfer_data_out_pio(nfc, data_buf, lt->data_bytes);
marvell_nfc_xfer_data_out_pio(nfc, oob_buf, oob_bytes);
}
ret = marvell_nfc_wait_cmdd(chip);
if (ret)
return ret;
ret = marvell_nfc_wait_op(chip,
PSEC_TO_MSEC(sdr->tPROG_max));
return ret;
}
static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip,
const u8 *buf,
int oob_required, int page)
{
marvell_nfc_select_target(chip, chip->cur_cs);
return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
true, page);
}
static int marvell_nfc_hw_ecc_hmg_write_page(struct nand_chip *chip,
const u8 *buf,
int oob_required, int page)
{
int ret;
marvell_nfc_select_target(chip, chip->cur_cs);
marvell_nfc_enable_hw_ecc(chip);
ret = marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
false, page);
marvell_nfc_disable_hw_ecc(chip);
return ret;
}
/*
* Spare area in Hamming layouts is not protected by the ECC engine (even if
* it appears before the ECC bytes when reading), the ->write_oob_raw() function
* also stands for ->write_oob().
*/
static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct nand_chip *chip,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
u8 *buf = nand_get_data_buf(chip);
memset(buf, 0xFF, mtd->writesize);
marvell_nfc_select_target(chip, chip->cur_cs);
return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
true, page);
}
/* BCH read helpers */
static int marvell_nfc_hw_ecc_bch_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
u8 *oob = chip->oob_poi;
int chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
lt->last_spare_bytes;
int data_len = lt->data_bytes;
int spare_len = lt->spare_bytes;
int ecc_len = lt->ecc_bytes;
int chunk;
marvell_nfc_select_target(chip, chip->cur_cs);
if (oob_required)
memset(chip->oob_poi, 0xFF, mtd->oobsize);
nand_read_page_op(chip, page, 0, NULL, 0);
for (chunk = 0; chunk < lt->nchunks; chunk++) {
/* Update last chunk length */
if (chunk >= lt->full_chunk_cnt) {
data_len = lt->last_data_bytes;
spare_len = lt->last_spare_bytes;
ecc_len = lt->last_ecc_bytes;
}
/* Read data bytes*/
nand_change_read_column_op(chip, chunk * chunk_size,
buf + (lt->data_bytes * chunk),
data_len, false);
/* Read spare bytes */
nand_read_data_op(chip, oob + (lt->spare_bytes * chunk),
spare_len, false, false);
/* Read ECC bytes */
nand_read_data_op(chip, oob + ecc_offset +
(ALIGN(lt->ecc_bytes, 32) * chunk),
ecc_len, false, false);
}
return 0;
}
static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
u8 *data, unsigned int data_len,
u8 *spare, unsigned int spare_len,
int page)
{
struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
int i, ret;
struct marvell_nfc_op nfc_op = {
.ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
NDCB0_LEN_OVRD,
.ndcb[1] = NDCB1_ADDRS_PAGE(page),
.ndcb[2] = NDCB2_ADDR5_PAGE(page),
.ndcb[3] = data_len + spare_len,
};
ret = marvell_nfc_prepare_cmd(chip);
if (ret)
return;
if (chunk == 0)
nfc_op.ndcb[0] |= NDCB0_DBC |
NDCB0_CMD1(NAND_CMD_READ0) |
NDCB0_CMD2(NAND_CMD_READSTART);
/*
* Trigger the monolithic read on the first chunk, then naked read on
* intermediate chunks and finally a last naked read on the last chunk.
*/
if (chunk == 0)
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
else if (chunk < lt->nchunks - 1)
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
else
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
marvell_nfc_send_cmd(chip, &nfc_op);
/*
* According to the datasheet, when reading from NDDB
* with BCH enabled, after each 32 bytes reads, we
* have to make sure that the NDSR.RDDREQ bit is set.
*
* Drain the FIFO, 8 32-bit reads at a time, and skip
* the polling on the last read.
*
* Length is a multiple of 32 bytes, hence it is a multiple of 8 too.
*/
for (i = 0; i < data_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
"RDDREQ while draining FIFO (data)");
marvell_nfc_xfer_data_in_pio(nfc, data,
FIFO_DEPTH * BCH_SEQ_READS);
data += FIFO_DEPTH * BCH_SEQ_READS;
}
for (i = 0; i < spare_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
"RDDREQ while draining FIFO (OOB)");
marvell_nfc_xfer_data_in_pio(nfc, spare,
FIFO_DEPTH * BCH_SEQ_READS);
spare += FIFO_DEPTH * BCH_SEQ_READS;
}
}
static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip,
u8 *buf, int oob_required,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
int data_len = lt->data_bytes, spare_len = lt->spare_bytes;
u8 *data = buf, *spare = chip->oob_poi;
int max_bitflips = 0;
u32 failure_mask = 0;
int chunk, ret;
marvell_nfc_select_target(chip, chip->cur_cs);
/*
* With BCH, OOB is not fully used (and thus not read entirely), not
* expected bytes could show up at the end of the OOB buffer if not
* explicitly erased.
*/
if (oob_required)
memset(chip->oob_poi, 0xFF, mtd->oobsize);
marvell_nfc_enable_hw_ecc(chip);
for (chunk = 0; chunk < lt->nchunks; chunk++) {
/* Update length for the last chunk */
if (chunk >= lt->full_chunk_cnt) {
data_len = lt->last_data_bytes;
spare_len = lt->last_spare_bytes;
}
/* Read the chunk and detect number of bitflips */
marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len,
spare, spare_len, page);
ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips);
if (ret)
failure_mask |= BIT(chunk);
data += data_len;
spare += spare_len;
}
marvell_nfc_disable_hw_ecc(chip);
if (!failure_mask)
return max_bitflips;
/*
* Please note that dumping the ECC bytes during a normal read with OOB
* area would add a significant overhead as ECC bytes are "consumed" by
* the controller in normal mode and must be re-read in raw mode. To
* avoid dropping the performances, we prefer not to include them. The
* user should re-read the page in raw mode if ECC bytes are required.
*/
/*
* In case there is any subpage read error, we usually re-read only ECC
* bytes in raw mode and check if the whole page is empty. In this case,
* it is normal that the ECC check failed and we just ignore the error.
*
* However, it has been empirically observed that for some layouts (e.g
* 2k page, 8b strength per 512B chunk), the controller tries to correct
* bits and may create itself bitflips in the erased area. To overcome
* this strange behavior, the whole page is re-read in raw mode, not
* only the ECC bytes.
*/
for (chunk = 0; chunk < lt->nchunks; chunk++) {
int data_off_in_page, spare_off_in_page, ecc_off_in_page;
int data_off, spare_off, ecc_off;
int data_len, spare_len, ecc_len;
/* No failure reported for this chunk, move to the next one */
if (!(failure_mask & BIT(chunk)))
continue;
data_off_in_page = chunk * (lt->data_bytes + lt->spare_bytes +
lt->ecc_bytes);
spare_off_in_page = data_off_in_page +
(chunk < lt->full_chunk_cnt ? lt->data_bytes :
lt->last_data_bytes);
ecc_off_in_page = spare_off_in_page +
(chunk < lt->full_chunk_cnt ? lt->spare_bytes :
lt->last_spare_bytes);
data_off = chunk * lt->data_bytes;
spare_off = chunk * lt->spare_bytes;
ecc_off = (lt->full_chunk_cnt * lt->spare_bytes) +
lt->last_spare_bytes +
(chunk * (lt->ecc_bytes + 2));
data_len = chunk < lt->full_chunk_cnt ? lt->data_bytes :
lt->last_data_bytes;
spare_len = chunk < lt->full_chunk_cnt ? lt->spare_bytes :
lt->last_spare_bytes;
ecc_len = chunk < lt->full_chunk_cnt ? lt->ecc_bytes :
lt->last_ecc_bytes;
/*
* Only re-read the ECC bytes, unless we are using the 2k/8b
* layout which is buggy in the sense that the ECC engine will
* try to correct data bytes anyway, creating bitflips. In this
* case, re-read the entire page.
*/
if (lt->writesize == 2048 && lt->strength == 8) {
nand_change_read_column_op(chip, data_off_in_page,
buf + data_off, data_len,
false);
nand_change_read_column_op(chip, spare_off_in_page,
chip->oob_poi + spare_off, spare_len,
false);
}
nand_change_read_column_op(chip, ecc_off_in_page,
chip->oob_poi + ecc_off, ecc_len,
false);
/* Check the entire chunk (data + spare + ecc) for emptyness */
marvell_nfc_check_empty_chunk(chip, buf + data_off, data_len,
chip->oob_poi + spare_off, spare_len,
chip->oob_poi + ecc_off, ecc_len,
&max_bitflips);
}
return max_bitflips;
}
static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct nand_chip *chip, int page)
{
u8 *buf = nand_get_data_buf(chip);
return chip->ecc.read_page_raw(chip, buf, true, page);
}
static int marvell_nfc_hw_ecc_bch_read_oob(struct nand_chip *chip, int page)
{
u8 *buf = nand_get_data_buf(chip);
return chip->ecc.read_page(chip, buf, true, page);
}
/* BCH write helpers */
static int marvell_nfc_hw_ecc_bch_write_page_raw(struct nand_chip *chip,
const u8 *buf,
int oob_required, int page)
{
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
int full_chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
int data_len = lt->data_bytes;
int spare_len = lt->spare_bytes;
int ecc_len = lt->ecc_bytes;
int spare_offset = 0;
int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
lt->last_spare_bytes;
int chunk;
marvell_nfc_select_target(chip, chip->cur_cs);
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
for (chunk = 0; chunk < lt->nchunks; chunk++) {
if (chunk >= lt->full_chunk_cnt) {
data_len = lt->last_data_bytes;
spare_len = lt->last_spare_bytes;
ecc_len = lt->last_ecc_bytes;
}
/* Point to the column of the next chunk */
nand_change_write_column_op(chip, chunk * full_chunk_size,
NULL, 0, false);
/* Write the data */
nand_write_data_op(chip, buf + (chunk * lt->data_bytes),
data_len, false);
if (!oob_required)
continue;
/* Write the spare bytes */
if (spare_len)
nand_write_data_op(chip, chip->oob_poi + spare_offset,
spare_len, false);
/* Write the ECC bytes */
if (ecc_len)
nand_write_data_op(chip, chip->oob_poi + ecc_offset,
ecc_len, false);
spare_offset += spare_len;
ecc_offset += ALIGN(ecc_len, 32);
}
return nand_prog_page_end_op(chip);
}
static int
marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
const u8 *data, unsigned int data_len,
const u8 *spare, unsigned int spare_len,
int page)
{
struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
u32 xtype;
int ret;
struct marvell_nfc_op nfc_op = {
.ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
.ndcb[3] = data_len + spare_len,
};
/*
* First operation dispatches the CMD_SEQIN command, issue the address
* cycles and asks for the first chunk of data.
* All operations in the middle (if any) will issue a naked write and
* also ask for data.
* Last operation (if any) asks for the last chunk of data through a
* last naked write.
*/
if (chunk == 0) {
if (lt->nchunks == 1)
xtype = XTYPE_MONOLITHIC_RW;
else
xtype = XTYPE_WRITE_DISPATCH;
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(xtype) |
NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
NDCB0_CMD1(NAND_CMD_SEQIN);
nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
nfc_op.ndcb[2] |= NDCB2_ADDR5_PAGE(page);
} else if (chunk < lt->nchunks - 1) {
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
} else {
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
}
/* Always dispatch the PAGEPROG command on the last chunk */
if (chunk == lt->nchunks - 1)
nfc_op.ndcb[0] |= NDCB0_CMD2(NAND_CMD_PAGEPROG) | NDCB0_DBC;
ret = marvell_nfc_prepare_cmd(chip);
if (ret)
return ret;
marvell_nfc_send_cmd(chip, &nfc_op);
ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
"WRDREQ while loading FIFO (data)");
if (ret)
return ret;
/* Transfer the contents */
iowrite32_rep(nfc->regs + NDDB, data, FIFO_REP(data_len));
iowrite32_rep(nfc->regs + NDDB, spare, FIFO_REP(spare_len));
return 0;
}
static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
const u8 *buf,
int oob_required, int page)
{
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(nand_get_interface_config(chip));
struct mtd_info *mtd = nand_to_mtd(chip);
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
const u8 *data = buf;
const u8 *spare = chip->oob_poi;
int data_len = lt->data_bytes;
int spare_len = lt->spare_bytes;
int chunk, ret;
marvell_nfc_select_target(chip, chip->cur_cs);
/* Spare data will be written anyway, so clear it to avoid garbage */
if (!oob_required)
memset(chip->oob_poi, 0xFF, mtd->oobsize);
marvell_nfc_enable_hw_ecc(chip);
for (chunk = 0; chunk < lt->nchunks; chunk++) {
if (chunk >= lt->full_chunk_cnt) {
data_len = lt->last_data_bytes;
spare_len = lt->last_spare_bytes;
}
marvell_nfc_hw_ecc_bch_write_chunk(chip, chunk, data, data_len,
spare, spare_len, page);
data += data_len;
spare += spare_len;
/*
* Waiting only for CMDD or PAGED is not enough, ECC are
* partially written. No flag is set once the operation is
* really finished but the ND_RUN bit is cleared, so wait for it
* before stepping into the next command.
*/
marvell_nfc_wait_ndrun(chip);
}
ret = marvell_nfc_wait_op(chip, PSEC_TO_MSEC(sdr->tPROG_max));
marvell_nfc_disable_hw_ecc(chip);
if (ret)
return ret;
return 0;
}
static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct nand_chip *chip,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
u8 *buf = nand_get_data_buf(chip);
memset(buf, 0xFF, mtd->writesize);
return chip->ecc.write_page_raw(chip, buf, true, page);
}
static int marvell_nfc_hw_ecc_bch_write_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
u8 *buf = nand_get_data_buf(chip);
memset(buf, 0xFF, mtd->writesize);
return chip->ecc.write_page(chip, buf, true, page);
}
/* NAND framework ->exec_op() hooks and related helpers */
static void marvell_nfc_parse_instructions(struct nand_chip *chip,
const struct nand_subop *subop,
struct marvell_nfc_op *nfc_op)
{
const struct nand_op_instr *instr = NULL;
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
bool first_cmd = true;
unsigned int op_id;
int i;
/* Reset the input structure as most of its fields will be OR'ed */
memset(nfc_op, 0, sizeof(struct marvell_nfc_op));
for (op_id = 0; op_id < subop->ninstrs; op_id++) {
unsigned int offset, naddrs;
const u8 *addrs;
int len;
instr = &subop->instrs[op_id];
switch (instr->type) {
case NAND_OP_CMD_INSTR:
if (first_cmd)
nfc_op->ndcb[0] |=
NDCB0_CMD1(instr->ctx.cmd.opcode);
else
nfc_op->ndcb[0] |=
NDCB0_CMD2(instr->ctx.cmd.opcode) |
NDCB0_DBC;
nfc_op->cle_ale_delay_ns = instr->delay_ns;
first_cmd = false;
break;
case NAND_OP_ADDR_INSTR:
offset = nand_subop_get_addr_start_off(subop, op_id);
naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
addrs = &instr->ctx.addr.addrs[offset];
nfc_op->ndcb[0] |= NDCB0_ADDR_CYC(naddrs);
for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
nfc_op->ndcb[1] |= addrs[i] << (8 * i);
if (naddrs >= 5)
nfc_op->ndcb[2] |= NDCB2_ADDR5_CYC(addrs[4]);
if (naddrs >= 6)
nfc_op->ndcb[3] |= NDCB3_ADDR6_CYC(addrs[5]);
if (naddrs == 7)
nfc_op->ndcb[3] |= NDCB3_ADDR7_CYC(addrs[6]);
nfc_op->cle_ale_delay_ns = instr->delay_ns;
break;
case NAND_OP_DATA_IN_INSTR:
nfc_op->data_instr = instr;
nfc_op->data_instr_idx = op_id;
nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ);
if (nfc->caps->is_nfcv2) {
nfc_op->ndcb[0] |=
NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
NDCB0_LEN_OVRD;
len = nand_subop_get_data_len(subop, op_id);
nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
}
nfc_op->data_delay_ns = instr->delay_ns;
break;
case NAND_OP_DATA_OUT_INSTR:
nfc_op->data_instr = instr;
nfc_op->data_instr_idx = op_id;
nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE);
if (nfc->caps->is_nfcv2) {
nfc_op->ndcb[0] |=
NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
NDCB0_LEN_OVRD;
len = nand_subop_get_data_len(subop, op_id);
nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
}
nfc_op->data_delay_ns = instr->delay_ns;
break;
case NAND_OP_WAITRDY_INSTR:
nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
nfc_op->rdy_delay_ns = instr->delay_ns;
break;
}
}
}
static int marvell_nfc_xfer_data_pio(struct nand_chip *chip,
const struct nand_subop *subop,
struct marvell_nfc_op *nfc_op)
{
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
const struct nand_op_instr *instr = nfc_op->data_instr;
unsigned int op_id = nfc_op->data_instr_idx;
unsigned int len = nand_subop_get_data_len(subop, op_id);
unsigned int offset = nand_subop_get_data_start_off(subop, op_id);
bool reading = (instr->type == NAND_OP_DATA_IN_INSTR);
int ret;
if (instr->ctx.data.force_8bit)
marvell_nfc_force_byte_access(chip, true);
if (reading) {
u8 *in = instr->ctx.data.buf.in + offset;
ret = marvell_nfc_xfer_data_in_pio(nfc, in, len);
} else {
const u8 *out = instr->ctx.data.buf.out + offset;
ret = marvell_nfc_xfer_data_out_pio(nfc, out, len);
}
if (instr->ctx.data.force_8bit)
marvell_nfc_force_byte_access(chip, false);
return ret;
}
static int marvell_nfc_monolithic_access_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
struct marvell_nfc_op nfc_op;
bool reading;
int ret;
marvell_nfc_parse_instructions(chip, subop, &nfc_op);
reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR);
ret = marvell_nfc_prepare_cmd(chip);
if (ret)
return ret;
marvell_nfc_send_cmd(chip, &nfc_op);
ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
"RDDREQ/WRDREQ while draining raw data");
if (ret)
return ret;
cond_delay(nfc_op.cle_ale_delay_ns);
if (reading) {
if (nfc_op.rdy_timeout_ms) {
ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
if (ret)
return ret;
}
cond_delay(nfc_op.rdy_delay_ns);
}
marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
ret = marvell_nfc_wait_cmdd(chip);
if (ret)
return ret;
cond_delay(nfc_op.data_delay_ns);
if (!reading) {
if (nfc_op.rdy_timeout_ms) {
ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
if (ret)
return ret;
}
cond_delay(nfc_op.rdy_delay_ns);
}
/*
* NDCR ND_RUN bit should be cleared automatically at the end of each
* operation but experience shows that the behavior is buggy when it
* comes to writes (with LEN_OVRD). Clear it by hand in this case.
*/
if (!reading) {
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
nfc->regs + NDCR);
}
return 0;
}
static int marvell_nfc_naked_access_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
struct marvell_nfc_op nfc_op;
int ret;
marvell_nfc_parse_instructions(chip, subop, &nfc_op);
/*
* Naked access are different in that they need to be flagged as naked
* by the controller. Reset the controller registers fields that inform
* on the type and refill them according to the ongoing operation.
*/
nfc_op.ndcb[0] &= ~(NDCB0_CMD_TYPE(TYPE_MASK) |
NDCB0_CMD_XTYPE(XTYPE_MASK));
switch (subop->instrs[0].type) {
case NAND_OP_CMD_INSTR:
nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_CMD);
break;
case NAND_OP_ADDR_INSTR:
nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_ADDR);
break;
case NAND_OP_DATA_IN_INSTR:
nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ) |
NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
break;
case NAND_OP_DATA_OUT_INSTR:
nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE) |
NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
break;
default:
/* This should never happen */
break;
}
ret = marvell_nfc_prepare_cmd(chip);
if (ret)
return ret;
marvell_nfc_send_cmd(chip, &nfc_op);
if (!nfc_op.data_instr) {
ret = marvell_nfc_wait_cmdd(chip);
cond_delay(nfc_op.cle_ale_delay_ns);
return ret;
}
ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
"RDDREQ/WRDREQ while draining raw data");
if (ret)
return ret;
marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
ret = marvell_nfc_wait_cmdd(chip);
if (ret)
return ret;
/*
* NDCR ND_RUN bit should be cleared automatically at the end of each
* operation but experience shows that the behavior is buggy when it
* comes to writes (with LEN_OVRD). Clear it by hand in this case.
*/
if (subop->instrs[0].type == NAND_OP_DATA_OUT_INSTR) {
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
nfc->regs + NDCR);
}
return 0;
}
static int marvell_nfc_naked_waitrdy_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
struct marvell_nfc_op nfc_op;
int ret;
marvell_nfc_parse_instructions(chip, subop, &nfc_op);
ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
cond_delay(nfc_op.rdy_delay_ns);
return ret;
}
static int marvell_nfc_read_id_type_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
struct marvell_nfc_op nfc_op;
int ret;
marvell_nfc_parse_instructions(chip, subop, &nfc_op);
nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ_ID);
ret = marvell_nfc_prepare_cmd(chip);
if (ret)
return ret;
marvell_nfc_send_cmd(chip, &nfc_op);
ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
"RDDREQ while reading ID");
if (ret)
return ret;
cond_delay(nfc_op.cle_ale_delay_ns);
if (nfc_op.rdy_timeout_ms) {
ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
if (ret)
return ret;
}
cond_delay(nfc_op.rdy_delay_ns);
marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
ret = marvell_nfc_wait_cmdd(chip);
if (ret)
return ret;
cond_delay(nfc_op.data_delay_ns);
return 0;
}
static int marvell_nfc_read_status_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
struct marvell_nfc_op nfc_op;
int ret;
marvell_nfc_parse_instructions(chip, subop, &nfc_op);
nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_STATUS);
ret = marvell_nfc_prepare_cmd(chip);
if (ret)
return ret;
marvell_nfc_send_cmd(chip, &nfc_op);
ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
"RDDREQ while reading status");
if (ret)
return ret;
cond_delay(nfc_op.cle_ale_delay_ns);
if (nfc_op.rdy_timeout_ms) {
ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
if (ret)
return ret;
}
cond_delay(nfc_op.rdy_delay_ns);
marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
ret = marvell_nfc_wait_cmdd(chip);
if (ret)
return ret;
cond_delay(nfc_op.data_delay_ns);
return 0;
}
static int marvell_nfc_reset_cmd_type_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
struct marvell_nfc_op nfc_op;
int ret;
marvell_nfc_parse_instructions(chip, subop, &nfc_op);
nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_RESET);
ret = marvell_nfc_prepare_cmd(chip);
if (ret)
return ret;
marvell_nfc_send_cmd(chip, &nfc_op);
ret = marvell_nfc_wait_cmdd(chip);
if (ret)
return ret;
cond_delay(nfc_op.cle_ale_delay_ns);
ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
if (ret)
return ret;
cond_delay(nfc_op.rdy_delay_ns);
return 0;
}
static int marvell_nfc_erase_cmd_type_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
struct marvell_nfc_op nfc_op;
int ret;
marvell_nfc_parse_instructions(chip, subop, &nfc_op);
nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_ERASE);
ret = marvell_nfc_prepare_cmd(chip);
if (ret)
return ret;
marvell_nfc_send_cmd(chip, &nfc_op);
ret = marvell_nfc_wait_cmdd(chip);
if (ret)
return ret;
cond_delay(nfc_op.cle_ale_delay_ns);
ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
if (ret)
return ret;
cond_delay(nfc_op.rdy_delay_ns);
return 0;
}
static const struct nand_op_parser marvell_nfcv2_op_parser = NAND_OP_PARSER(
/* Monolithic reads/writes */
NAND_OP_PARSER_PATTERN(
marvell_nfc_monolithic_access_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC_NFCV2),
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
NAND_OP_PARSER_PATTERN(
marvell_nfc_monolithic_access_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2),
NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE),
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
/* Naked commands */
NAND_OP_PARSER_PATTERN(
marvell_nfc_naked_access_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false)),
NAND_OP_PARSER_PATTERN(
marvell_nfc_naked_access_exec,
NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2)),
NAND_OP_PARSER_PATTERN(
marvell_nfc_naked_access_exec,
NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
NAND_OP_PARSER_PATTERN(
marvell_nfc_naked_access_exec,
NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE)),
NAND_OP_PARSER_PATTERN(
marvell_nfc_naked_waitrdy_exec,
NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
);
static const struct nand_op_parser marvell_nfcv1_op_parser = NAND_OP_PARSER(
/* Naked commands not supported, use a function for each pattern */
NAND_OP_PARSER_PATTERN(
marvell_nfc_read_id_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
NAND_OP_PARSER_PATTERN(
marvell_nfc_erase_cmd_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
NAND_OP_PARSER_PATTERN(
marvell_nfc_read_status_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
NAND_OP_PARSER_PATTERN(
marvell_nfc_reset_cmd_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
NAND_OP_PARSER_PATTERN(
marvell_nfc_naked_waitrdy_exec,
NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
);
static int marvell_nfc_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
if (!check_only)
marvell_nfc_select_target(chip, op->cs);
if (nfc->caps->is_nfcv2)
return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
op, check_only);
else
return nand_op_parser_exec_op(chip, &marvell_nfcv1_op_parser,
op, check_only);
}
/*
* Layouts were broken in old pxa3xx_nand driver, these are supposed to be
* usable.
*/
static int marvell_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
if (section)
return -ERANGE;
oobregion->length = (lt->full_chunk_cnt * lt->ecc_bytes) +
lt->last_ecc_bytes;
oobregion->offset = mtd->oobsize - oobregion->length;
return 0;
}
static int marvell_nand_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
if (section)
return -ERANGE;
/*
* Bootrom looks in bytes 0 & 5 for bad blocks for the
* 4KB page / 4bit BCH combination.
*/
if (mtd->writesize == SZ_4K && lt->data_bytes == SZ_2K)
oobregion->offset = 6;
else
oobregion->offset = 2;
oobregion->length = (lt->full_chunk_cnt * lt->spare_bytes) +
lt->last_spare_bytes - oobregion->offset;
return 0;
}
static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = {
.ecc = marvell_nand_ooblayout_ecc,
.free = marvell_nand_ooblayout_free,
};
static int marvell_nand_hw_ecc_controller_init(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
const struct marvell_hw_ecc_layout *l;
int i;
if (!nfc->caps->is_nfcv2 &&
(mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) {
dev_err(nfc->dev,
"NFCv1: writesize (%d) cannot be bigger than a chunk (%d)\n",
mtd->writesize, MAX_CHUNK_SIZE - mtd->oobsize);
return -ENOTSUPP;
}
to_marvell_nand(chip)->layout = NULL;
for (i = 0; i < ARRAY_SIZE(marvell_nfc_layouts); i++) {
l = &marvell_nfc_layouts[i];
if (mtd->writesize == l->writesize &&
ecc->size == l->chunk && ecc->strength == l->strength) {
to_marvell_nand(chip)->layout = l;
break;
}
}
if (!to_marvell_nand(chip)->layout ||
(!nfc->caps->is_nfcv2 && ecc->strength > 1)) {
dev_err(nfc->dev,
"ECC strength %d at page size %d is not supported\n",
ecc->strength, mtd->writesize);
return -ENOTSUPP;
}
/* Special care for the layout 2k/8-bit/512B */
if (l->writesize == 2048 && l->strength == 8) {
if (mtd->oobsize < 128) {
dev_err(nfc->dev, "Requested layout needs at least 128 OOB bytes\n");
return -ENOTSUPP;
} else {
chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
}
}
mtd_set_ooblayout(mtd, &marvell_nand_ooblayout_ops);
ecc->steps = l->nchunks;
ecc->size = l->data_bytes;
if (ecc->strength == 1) {
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
ecc->read_page_raw = marvell_nfc_hw_ecc_hmg_read_page_raw;
ecc->read_page = marvell_nfc_hw_ecc_hmg_read_page;
ecc->read_oob_raw = marvell_nfc_hw_ecc_hmg_read_oob_raw;
ecc->read_oob = ecc->read_oob_raw;
ecc->write_page_raw = marvell_nfc_hw_ecc_hmg_write_page_raw;
ecc->write_page = marvell_nfc_hw_ecc_hmg_write_page;
ecc->write_oob_raw = marvell_nfc_hw_ecc_hmg_write_oob_raw;
ecc->write_oob = ecc->write_oob_raw;
} else {
chip->ecc.algo = NAND_ECC_ALGO_BCH;
ecc->strength = 16;
ecc->read_page_raw = marvell_nfc_hw_ecc_bch_read_page_raw;
ecc->read_page = marvell_nfc_hw_ecc_bch_read_page;
ecc->read_oob_raw = marvell_nfc_hw_ecc_bch_read_oob_raw;
ecc->read_oob = marvell_nfc_hw_ecc_bch_read_oob;
ecc->write_page_raw = marvell_nfc_hw_ecc_bch_write_page_raw;
ecc->write_page = marvell_nfc_hw_ecc_bch_write_page;
ecc->write_oob_raw = marvell_nfc_hw_ecc_bch_write_oob_raw;
ecc->write_oob = marvell_nfc_hw_ecc_bch_write_oob;
}
return 0;
}
static int marvell_nand_ecc_init(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc)
{
struct nand_chip *chip = mtd_to_nand(mtd);
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(&chip->base);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
int ret;
if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
(!ecc->size || !ecc->strength)) {
if (requirements->step_size && requirements->strength) {
ecc->size = requirements->step_size;
ecc->strength = requirements->strength;
} else {
dev_info(nfc->dev,
"No minimum ECC strength, using 1b/512B\n");
ecc->size = 512;
ecc->strength = 1;
}
}
switch (ecc->engine_type) {
case NAND_ECC_ENGINE_TYPE_ON_HOST:
ret = marvell_nand_hw_ecc_controller_init(mtd, ecc);
if (ret)
return ret;
break;
case NAND_ECC_ENGINE_TYPE_NONE:
case NAND_ECC_ENGINE_TYPE_SOFT:
case NAND_ECC_ENGINE_TYPE_ON_DIE:
if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 &&
mtd->writesize != SZ_2K) {
dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n",
mtd->writesize);
return -EINVAL;
}
break;
default:
return -EINVAL;
}
return 0;
}
static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
static struct nand_bbt_descr bbt_main_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
NAND_BBT_2BIT | NAND_BBT_VERSION,
.offs = 8,
.len = 6,
.veroffs = 14,
.maxblocks = 8, /* Last 8 blocks in each chip */
.pattern = bbt_pattern
};
static struct nand_bbt_descr bbt_mirror_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
NAND_BBT_2BIT | NAND_BBT_VERSION,
.offs = 8,
.len = 6,
.veroffs = 14,
.maxblocks = 8, /* Last 8 blocks in each chip */
.pattern = bbt_mirror_pattern
};
static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr,
const struct nand_interface_config *conf)
{
struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
unsigned int period_ns = 1000000000 / clk_get_rate(nfc->core_clk) * 2;
const struct nand_sdr_timings *sdr;
struct marvell_nfc_timings nfc_tmg;
int read_delay;
sdr = nand_get_sdr_timings(conf);
if (IS_ERR(sdr))
return PTR_ERR(sdr);
if (nfc->caps->max_mode_number && nfc->caps->max_mode_number < conf->timings.mode)
return -EOPNOTSUPP;
/*
* SDR timings are given in pico-seconds while NFC timings must be
* expressed in NAND controller clock cycles, which is half of the
* frequency of the accessible ECC clock retrieved by clk_get_rate().
* This is not written anywhere in the datasheet but was observed
* with an oscilloscope.
*
* NFC datasheet gives equations from which thoses calculations
* are derived, they tend to be slightly more restrictives than the
* given core timings and may improve the overall speed.
*/
nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1;
nfc_tmg.tRH = nfc_tmg.tRP;
nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1;
nfc_tmg.tWH = nfc_tmg.tWP;
nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns);
nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1;
nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns);
/*
* Read delay is the time of propagation from SoC pins to NFC internal
* logic. With non-EDO timings, this is MIN_RD_DEL_CNT clock cycles. In
* EDO mode, an additional delay of tRH must be taken into account so
* the data is sampled on the falling edge instead of the rising edge.
*/
read_delay = sdr->tRC_min >= 30000 ?
MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH;
nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns);
/*
* tWHR and tRHW are supposed to be read to write delays (and vice
* versa) but in some cases, ie. when doing a change column, they must
* be greater than that to be sure tCCS delay is respected.
*/
nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min),
period_ns) - 2;
nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min),
period_ns);
/*
* NFCv2: Use WAIT_MODE (wait for RB line), do not rely only on delays.
* NFCv1: No WAIT_MODE, tR must be maximal.
*/
if (nfc->caps->is_nfcv2) {
nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns);
} else {
nfc_tmg.tR = TO_CYCLES64(sdr->tWB_max + sdr->tR_max,
period_ns);
if (nfc_tmg.tR + 3 > nfc_tmg.tCH)
nfc_tmg.tR = nfc_tmg.tCH - 3;
else
nfc_tmg.tR = 0;
}
if (chipnr < 0)
return 0;
marvell_nand->ndtr0 =
NDTR0_TRP(nfc_tmg.tRP) |
NDTR0_TRH(nfc_tmg.tRH) |
NDTR0_ETRP(nfc_tmg.tRP) |
NDTR0_TWP(nfc_tmg.tWP) |
NDTR0_TWH(nfc_tmg.tWH) |
NDTR0_TCS(nfc_tmg.tCS) |
NDTR0_TCH(nfc_tmg.tCH);
marvell_nand->ndtr1 =
NDTR1_TAR(nfc_tmg.tAR) |
NDTR1_TWHR(nfc_tmg.tWHR) |
NDTR1_TR(nfc_tmg.tR);
if (nfc->caps->is_nfcv2) {
marvell_nand->ndtr0 |=
NDTR0_RD_CNT_DEL(read_delay) |
NDTR0_SELCNTR |
NDTR0_TADL(nfc_tmg.tADL);
marvell_nand->ndtr1 |=
NDTR1_TRHW(nfc_tmg.tRHW) |
NDTR1_WAIT_MODE;
}
/*
* Reset nfc->selected_chip so the next command will cause the timing
* registers to be updated in marvell_nfc_select_target().
*/
nfc->selected_chip = NULL;
return 0;
}
static int marvell_nand_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(nfc->dev);
int ret;
if (pdata && pdata->flash_bbt)
chip->bbt_options |= NAND_BBT_USE_FLASH;
if (chip->bbt_options & NAND_BBT_USE_FLASH) {
/*
* We'll use a bad block table stored in-flash and don't
* allow writing the bad block marker to the flash.
*/
chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
chip->bbt_td = &bbt_main_descr;
chip->bbt_md = &bbt_mirror_descr;
}
/* Save the chip-specific fields of NDCR */
marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize);
if (chip->options & NAND_BUSWIDTH_16)
marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
/*
* On small page NANDs, only one cycle is needed to pass the
* column address.
*/
if (mtd->writesize <= 512) {
marvell_nand->addr_cyc = 1;
} else {
marvell_nand->addr_cyc = 2;
marvell_nand->ndcr |= NDCR_RA_START;
}
/*
* Now add the number of cycles needed to pass the row
* address.
*
* Addressing a chip using CS 2 or 3 should also need the third row
* cycle but due to inconsistance in the documentation and lack of
* hardware to test this situation, this case is not supported.
*/
if (chip->options & NAND_ROW_ADDR_3)
marvell_nand->addr_cyc += 3;
else
marvell_nand->addr_cyc += 2;
if (pdata) {
chip->ecc.size = pdata->ecc_step_size;
chip->ecc.strength = pdata->ecc_strength;
}
ret = marvell_nand_ecc_init(mtd, &chip->ecc);
if (ret) {
dev_err(nfc->dev, "ECC init failed: %d\n", ret);
return ret;
}
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
/*
* Subpage write not available with hardware ECC, prohibit also
* subpage read as in userspace subpage access would still be
* allowed and subpage write, if used, would lead to numerous
* uncorrectable ECC errors.
*/
chip->options |= NAND_NO_SUBPAGE_WRITE;
}
if (pdata || nfc->caps->legacy_of_bindings) {
/*
* We keep the MTD name unchanged to avoid breaking platforms
* where the MTD cmdline parser is used and the bootloader
* has not been updated to use the new naming scheme.
*/
mtd->name = "pxa3xx_nand-0";
} else if (!mtd->name) {
/*
* If the new bindings are used and the bootloader has not been
* updated to pass a new mtdparts parameter on the cmdline, you
* should define the following property in your NAND node, ie:
*
* label = "main-storage";
*
* This way, mtd->name will be set by the core when
* nand_set_flash_node() is called.
*/
mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
"%s:nand.%d", dev_name(nfc->dev),
marvell_nand->sels[0].cs);
if (!mtd->name) {
dev_err(nfc->dev, "Failed to allocate mtd->name\n");
return -ENOMEM;
}
}
return 0;
}
static const struct nand_controller_ops marvell_nand_controller_ops = {
.attach_chip = marvell_nand_attach_chip,
.exec_op = marvell_nfc_exec_op,
.setup_interface = marvell_nfc_setup_interface,
};
static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
struct device_node *np)
{
struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(dev);
struct marvell_nand_chip *marvell_nand;
struct mtd_info *mtd;
struct nand_chip *chip;
int nsels, ret, i;
u32 cs, rb;
/*
* The legacy "num-cs" property indicates the number of CS on the only
* chip connected to the controller (legacy bindings does not support
* more than one chip). The CS and RB pins are always the #0.
*
* When not using legacy bindings, a couple of "reg" and "nand-rb"
* properties must be filled. For each chip, expressed as a subnode,
* "reg" points to the CS lines and "nand-rb" to the RB line.
*/
if (pdata || nfc->caps->legacy_of_bindings) {
nsels = 1;
} else {
nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
if (nsels <= 0) {
dev_err(dev, "missing/invalid reg property\n");
return -EINVAL;
}
}
/* Alloc the nand chip structure */
marvell_nand = devm_kzalloc(dev,
struct_size(marvell_nand, sels, nsels),
GFP_KERNEL);
if (!marvell_nand) {
dev_err(dev, "could not allocate chip structure\n");
return -ENOMEM;
}
marvell_nand->nsels = nsels;
marvell_nand->selected_die = -1;
for (i = 0; i < nsels; i++) {
if (pdata || nfc->caps->legacy_of_bindings) {
/*
* Legacy bindings use the CS lines in natural
* order (0, 1, ...)
*/
cs = i;
} else {
/* Retrieve CS id */
ret = of_property_read_u32_index(np, "reg", i, &cs);
if (ret) {
dev_err(dev, "could not retrieve reg property: %d\n",
ret);
return ret;
}
}
if (cs >= nfc->caps->max_cs_nb) {
dev_err(dev, "invalid reg value: %u (max CS = %d)\n",
cs, nfc->caps->max_cs_nb);
return -EINVAL;
}
if (test_and_set_bit(cs, &nfc->assigned_cs)) {
dev_err(dev, "CS %d already assigned\n", cs);
return -EINVAL;
}
/*
* The cs variable represents the chip select id, which must be
* converted in bit fields for NDCB0 and NDCB2 to select the
* right chip. Unfortunately, due to a lack of information on
* the subject and incoherent documentation, the user should not
* use CS1 and CS3 at all as asserting them is not supported in
* a reliable way (due to multiplexing inside ADDR5 field).
*/
marvell_nand->sels[i].cs = cs;
switch (cs) {
case 0:
case 2:
marvell_nand->sels[i].ndcb0_csel = 0;
break;
case 1:
case 3:
marvell_nand->sels[i].ndcb0_csel = NDCB0_CSEL;
break;
default:
return -EINVAL;
}
/* Retrieve RB id */
if (pdata || nfc->caps->legacy_of_bindings) {
/* Legacy bindings always use RB #0 */
rb = 0;
} else {
ret = of_property_read_u32_index(np, "nand-rb", i,
&rb);
if (ret) {
dev_err(dev,
"could not retrieve RB property: %d\n",
ret);
return ret;
}
}
if (rb >= nfc->caps->max_rb_nb) {
dev_err(dev, "invalid reg value: %u (max RB = %d)\n",
rb, nfc->caps->max_rb_nb);
return -EINVAL;
}
marvell_nand->sels[i].rb = rb;
}
chip = &marvell_nand->chip;
chip->controller = &nfc->controller;
nand_set_flash_node(chip, np);
if (of_property_read_bool(np, "marvell,nand-keep-config"))
chip->options |= NAND_KEEP_TIMINGS;
mtd = nand_to_mtd(chip);
mtd->dev.parent = dev;
/*
* Save a reference value for timing registers before
* ->setup_interface() is called.
*/
marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0);
marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1);
chip->options |= NAND_BUSWIDTH_AUTO;
ret = nand_scan(chip, marvell_nand->nsels);
if (ret) {
dev_err(dev, "could not scan the nand chip\n");
return ret;
}
if (pdata)
/* Legacy bindings support only one chip */
ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
else
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "failed to register mtd device: %d\n", ret);
nand_cleanup(chip);
return ret;
}
list_add_tail(&marvell_nand->node, &nfc->chips);
return 0;
}
static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
{
struct marvell_nand_chip *entry, *temp;
struct nand_chip *chip;
int ret;
list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
chip = &entry->chip;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
list_del(&entry->node);
}
}
static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
{
struct device_node *np = dev->of_node;
struct device_node *nand_np;
int max_cs = nfc->caps->max_cs_nb;
int nchips;
int ret;
if (!np)
nchips = 1;
else
nchips = of_get_child_count(np);
if (nchips > max_cs) {
dev_err(dev, "too many NAND chips: %d (max = %d CS)\n", nchips,
max_cs);
return -EINVAL;
}
/*
* Legacy bindings do not use child nodes to exhibit NAND chip
* properties and layout. Instead, NAND properties are mixed with the
* controller ones, and partitions are defined as direct subnodes of the
* NAND controller node.
*/
if (nfc->caps->legacy_of_bindings) {
ret = marvell_nand_chip_init(dev, nfc, np);
return ret;
}
for_each_child_of_node(np, nand_np) {
ret = marvell_nand_chip_init(dev, nfc, nand_np);
if (ret) {
of_node_put(nand_np);
goto cleanup_chips;
}
}
return 0;
cleanup_chips:
marvell_nand_chips_cleanup(nfc);
return ret;
}
static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
{
struct platform_device *pdev = container_of(nfc->dev,
struct platform_device,
dev);
struct dma_slave_config config = {};
struct resource *r;
int ret;
if (!IS_ENABLED(CONFIG_PXA_DMA)) {
dev_warn(nfc->dev,
"DMA not enabled in configuration\n");
return -ENOTSUPP;
}
ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
nfc->dma_chan = dma_request_chan(nfc->dev, "data");
if (IS_ERR(nfc->dma_chan)) {
ret = PTR_ERR(nfc->dma_chan);
nfc->dma_chan = NULL;
return dev_err_probe(nfc->dev, ret, "DMA channel request failed\n");
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
ret = -ENXIO;
goto release_channel;
}
config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
config.src_addr = r->start + NDDB;
config.dst_addr = r->start + NDDB;
config.src_maxburst = 32;
config.dst_maxburst = 32;
ret = dmaengine_slave_config(nfc->dma_chan, &config);
if (ret < 0) {
dev_err(nfc->dev, "Failed to configure DMA channel\n");
goto release_channel;
}
/*
* DMA must act on length multiple of 32 and this length may be
* bigger than the destination buffer. Use this buffer instead
* for DMA transfers and then copy the desired amount of data to
* the provided buffer.
*/
nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
if (!nfc->dma_buf) {
ret = -ENOMEM;
goto release_channel;
}
nfc->use_dma = true;
return 0;
release_channel:
dma_release_channel(nfc->dma_chan);
nfc->dma_chan = NULL;
return ret;
}
static void marvell_nfc_reset(struct marvell_nfc *nfc)
{
/*
* ECC operations and interruptions are only enabled when specifically
* needed. ECC shall not be activated in the early stages (fails probe).
* Arbiter flag, even if marked as "reserved", must be set (empirical).
* SPARE_EN bit must always be set or ECC bytes will not be at the same
* offset in the read page and this will fail the protection.
*/
writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
writel_relaxed(0, nfc->regs + NDECCCTRL);
}
static int marvell_nfc_init(struct marvell_nfc *nfc)
{
struct device_node *np = nfc->dev->of_node;
/*
* Some SoCs like A7k/A8k need to enable manually the NAND
* controller, gated clocks and reset bits to avoid being bootloader
* dependent. This is done through the use of the System Functions
* registers.
*/
if (nfc->caps->need_system_controller) {
struct regmap *sysctrl_base =
syscon_regmap_lookup_by_phandle(np,
"marvell,system-controller");
if (IS_ERR(sysctrl_base))
return PTR_ERR(sysctrl_base);
regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX,
GENCONF_SOC_DEVICE_MUX_NFC_EN |
GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST |
GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST |
GENCONF_SOC_DEVICE_MUX_NFC_INT_EN |
GENCONF_SOC_DEVICE_MUX_NFC_DEVBUS_ARB_EN);
regmap_update_bits(sysctrl_base, GENCONF_CLK_GATING_CTRL,
GENCONF_CLK_GATING_CTRL_ND_GATE,
GENCONF_CLK_GATING_CTRL_ND_GATE);
}
/* Configure the DMA if appropriate */
if (!nfc->caps->is_nfcv2)
marvell_nfc_init_dma(nfc);
marvell_nfc_reset(nfc);
return 0;
}
static int marvell_nfc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct marvell_nfc *nfc;
int ret;
int irq;
nfc = devm_kzalloc(&pdev->dev, sizeof(struct marvell_nfc),
GFP_KERNEL);
if (!nfc)
return -ENOMEM;
nfc->dev = dev;
nand_controller_init(&nfc->controller);
nfc->controller.ops = &marvell_nand_controller_ops;
INIT_LIST_HEAD(&nfc->chips);
nfc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nfc->regs))
return PTR_ERR(nfc->regs);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
nfc->core_clk = devm_clk_get(&pdev->dev, "core");
/* Managed the legacy case (when the first clock was not named) */
if (nfc->core_clk == ERR_PTR(-ENOENT))
nfc->core_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(nfc->core_clk))
return PTR_ERR(nfc->core_clk);
ret = clk_prepare_enable(nfc->core_clk);
if (ret)
return ret;
nfc->reg_clk = devm_clk_get(&pdev->dev, "reg");
if (IS_ERR(nfc->reg_clk)) {
if (PTR_ERR(nfc->reg_clk) != -ENOENT) {
ret = PTR_ERR(nfc->reg_clk);
goto unprepare_core_clk;
}
nfc->reg_clk = NULL;
}
ret = clk_prepare_enable(nfc->reg_clk);
if (ret)
goto unprepare_core_clk;
marvell_nfc_disable_int(nfc, NDCR_ALL_INT);
marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
ret = devm_request_irq(dev, irq, marvell_nfc_isr,
0, "marvell-nfc", nfc);
if (ret)
goto unprepare_reg_clk;
/* Get NAND controller capabilities */
if (pdev->id_entry)
nfc->caps = (void *)pdev->id_entry->driver_data;
else
nfc->caps = of_device_get_match_data(&pdev->dev);
if (!nfc->caps) {
dev_err(dev, "Could not retrieve NFC caps\n");
ret = -EINVAL;
goto unprepare_reg_clk;
}
/* Init the controller and then probe the chips */
ret = marvell_nfc_init(nfc);
if (ret)
goto unprepare_reg_clk;
platform_set_drvdata(pdev, nfc);
ret = marvell_nand_chips_init(dev, nfc);
if (ret)
goto release_dma;
return 0;
release_dma:
if (nfc->use_dma)
dma_release_channel(nfc->dma_chan);
unprepare_reg_clk:
clk_disable_unprepare(nfc->reg_clk);
unprepare_core_clk:
clk_disable_unprepare(nfc->core_clk);
return ret;
}
static void marvell_nfc_remove(struct platform_device *pdev)
{
struct marvell_nfc *nfc = platform_get_drvdata(pdev);
marvell_nand_chips_cleanup(nfc);
if (nfc->use_dma) {
dmaengine_terminate_all(nfc->dma_chan);
dma_release_channel(nfc->dma_chan);
}
clk_disable_unprepare(nfc->reg_clk);
clk_disable_unprepare(nfc->core_clk);
}
static int __maybe_unused marvell_nfc_suspend(struct device *dev)
{
struct marvell_nfc *nfc = dev_get_drvdata(dev);
struct marvell_nand_chip *chip;
list_for_each_entry(chip, &nfc->chips, node)
marvell_nfc_wait_ndrun(&chip->chip);
clk_disable_unprepare(nfc->reg_clk);
clk_disable_unprepare(nfc->core_clk);
return 0;
}
static int __maybe_unused marvell_nfc_resume(struct device *dev)
{
struct marvell_nfc *nfc = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(nfc->core_clk);
if (ret < 0)
return ret;
ret = clk_prepare_enable(nfc->reg_clk);
if (ret < 0) {
clk_disable_unprepare(nfc->core_clk);
return ret;
}
/*
* Reset nfc->selected_chip so the next command will cause the timing
* registers to be restored in marvell_nfc_select_target().
*/
nfc->selected_chip = NULL;
/* Reset registers that have lost their contents */
marvell_nfc_reset(nfc);
return 0;
}
static const struct dev_pm_ops marvell_nfc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(marvell_nfc_suspend, marvell_nfc_resume)
};
static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
.max_cs_nb = 4,
.max_rb_nb = 2,
.need_system_controller = true,
.is_nfcv2 = true,
};
static const struct marvell_nfc_caps marvell_ac5_caps = {
.max_cs_nb = 2,
.max_rb_nb = 1,
.is_nfcv2 = true,
.max_mode_number = 3,
};
static const struct marvell_nfc_caps marvell_armada370_nfc_caps = {
.max_cs_nb = 4,
.max_rb_nb = 2,
.is_nfcv2 = true,
};
static const struct marvell_nfc_caps marvell_pxa3xx_nfc_caps = {
.max_cs_nb = 2,
.max_rb_nb = 1,
.use_dma = true,
};
static const struct marvell_nfc_caps marvell_armada_8k_nfc_legacy_caps = {
.max_cs_nb = 4,
.max_rb_nb = 2,
.need_system_controller = true,
.legacy_of_bindings = true,
.is_nfcv2 = true,
};
static const struct marvell_nfc_caps marvell_armada370_nfc_legacy_caps = {
.max_cs_nb = 4,
.max_rb_nb = 2,
.legacy_of_bindings = true,
.is_nfcv2 = true,
};
static const struct marvell_nfc_caps marvell_pxa3xx_nfc_legacy_caps = {
.max_cs_nb = 2,
.max_rb_nb = 1,
.legacy_of_bindings = true,
.use_dma = true,
};
static const struct platform_device_id marvell_nfc_platform_ids[] = {
{
.name = "pxa3xx-nand",
.driver_data = (kernel_ulong_t)&marvell_pxa3xx_nfc_legacy_caps,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(platform, marvell_nfc_platform_ids);
static const struct of_device_id marvell_nfc_of_ids[] = {
{
.compatible = "marvell,armada-8k-nand-controller",
.data = &marvell_armada_8k_nfc_caps,
},
{
.compatible = "marvell,ac5-nand-controller",
.data = &marvell_ac5_caps,
},
{
.compatible = "marvell,armada370-nand-controller",
.data = &marvell_armada370_nfc_caps,
},
{
.compatible = "marvell,pxa3xx-nand-controller",
.data = &marvell_pxa3xx_nfc_caps,
},
/* Support for old/deprecated bindings: */
{
.compatible = "marvell,armada-8k-nand",
.data = &marvell_armada_8k_nfc_legacy_caps,
},
{
.compatible = "marvell,armada370-nand",
.data = &marvell_armada370_nfc_legacy_caps,
},
{
.compatible = "marvell,pxa3xx-nand",
.data = &marvell_pxa3xx_nfc_legacy_caps,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, marvell_nfc_of_ids);
static struct platform_driver marvell_nfc_driver = {
.driver = {
.name = "marvell-nfc",
.of_match_table = marvell_nfc_of_ids,
.pm = &marvell_nfc_pm_ops,
},
.id_table = marvell_nfc_platform_ids,
.probe = marvell_nfc_probe,
.remove_new = marvell_nfc_remove,
};
module_platform_driver(marvell_nfc_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Marvell NAND controller driver");
| linux-master | drivers/mtd/nand/raw/marvell_nand.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale UPM NAND driver.
*
* Copyright © 2007-2008 MontaVista Software, Inc.
*
* Author: Anton Vorontsov <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/mtd.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <asm/fsl_lbc.h>
struct fsl_upm_nand {
struct nand_controller base;
struct device *dev;
struct nand_chip chip;
struct fsl_upm upm;
uint8_t upm_addr_offset;
uint8_t upm_cmd_offset;
void __iomem *io_base;
struct gpio_desc *rnb_gpio[NAND_MAX_CHIPS];
uint32_t mchip_offsets[NAND_MAX_CHIPS];
uint32_t mchip_count;
uint32_t mchip_number;
};
static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
{
return container_of(mtd_to_nand(mtdinfo), struct fsl_upm_nand,
chip);
}
static int fun_chip_init(struct fsl_upm_nand *fun,
const struct device_node *upm_np,
const struct resource *io_res)
{
struct mtd_info *mtd = nand_to_mtd(&fun->chip);
int ret;
struct device_node *flash_np;
fun->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
fun->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
fun->chip.controller = &fun->base;
mtd->dev.parent = fun->dev;
flash_np = of_get_next_child(upm_np, NULL);
if (!flash_np)
return -ENODEV;
nand_set_flash_node(&fun->chip, flash_np);
mtd->name = devm_kasprintf(fun->dev, GFP_KERNEL, "0x%llx.%pOFn",
(u64)io_res->start,
flash_np);
if (!mtd->name) {
ret = -ENOMEM;
goto err;
}
ret = nand_scan(&fun->chip, fun->mchip_count);
if (ret)
goto err;
ret = mtd_device_register(mtd, NULL, 0);
err:
of_node_put(flash_np);
return ret;
}
static int func_exec_instr(struct nand_chip *chip,
const struct nand_op_instr *instr)
{
struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
u32 mar, reg_offs = fun->mchip_offsets[fun->mchip_number];
unsigned int i;
const u8 *out;
u8 *in;
switch (instr->type) {
case NAND_OP_CMD_INSTR:
fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset);
mar = (instr->ctx.cmd.opcode << (32 - fun->upm.width)) |
reg_offs;
fsl_upm_run_pattern(&fun->upm, fun->io_base + reg_offs, mar);
fsl_upm_end_pattern(&fun->upm);
return 0;
case NAND_OP_ADDR_INSTR:
fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset);
for (i = 0; i < instr->ctx.addr.naddrs; i++) {
mar = (instr->ctx.addr.addrs[i] << (32 - fun->upm.width)) |
reg_offs;
fsl_upm_run_pattern(&fun->upm, fun->io_base + reg_offs, mar);
}
fsl_upm_end_pattern(&fun->upm);
return 0;
case NAND_OP_DATA_IN_INSTR:
in = instr->ctx.data.buf.in;
for (i = 0; i < instr->ctx.data.len; i++)
in[i] = in_8(fun->io_base + reg_offs);
return 0;
case NAND_OP_DATA_OUT_INSTR:
out = instr->ctx.data.buf.out;
for (i = 0; i < instr->ctx.data.len; i++)
out_8(fun->io_base + reg_offs, out[i]);
return 0;
case NAND_OP_WAITRDY_INSTR:
if (!fun->rnb_gpio[fun->mchip_number])
return nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
return nand_gpio_waitrdy(chip, fun->rnb_gpio[fun->mchip_number],
instr->ctx.waitrdy.timeout_ms);
default:
return -EINVAL;
}
return 0;
}
static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op,
bool check_only)
{
struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
unsigned int i;
int ret;
if (op->cs >= NAND_MAX_CHIPS)
return -EINVAL;
if (check_only)
return 0;
fun->mchip_number = op->cs;
for (i = 0; i < op->ninstrs; i++) {
ret = func_exec_instr(chip, &op->instrs[i]);
if (ret)
return ret;
if (op->instrs[i].delay_ns)
ndelay(op->instrs[i].delay_ns);
}
return 0;
}
static const struct nand_controller_ops fun_ops = {
.exec_op = fun_exec_op,
};
static int fun_probe(struct platform_device *ofdev)
{
struct fsl_upm_nand *fun;
struct resource *io_res;
const __be32 *prop;
int ret;
int size;
int i;
fun = devm_kzalloc(&ofdev->dev, sizeof(*fun), GFP_KERNEL);
if (!fun)
return -ENOMEM;
fun->io_base = devm_platform_get_and_ioremap_resource(ofdev, 0, &io_res);
if (IS_ERR(fun->io_base))
return PTR_ERR(fun->io_base);
ret = fsl_upm_find(io_res->start, &fun->upm);
if (ret) {
dev_err(&ofdev->dev, "can't find UPM\n");
return ret;
}
prop = of_get_property(ofdev->dev.of_node, "fsl,upm-addr-offset",
&size);
if (!prop || size != sizeof(uint32_t)) {
dev_err(&ofdev->dev, "can't get UPM address offset\n");
return -EINVAL;
}
fun->upm_addr_offset = *prop;
prop = of_get_property(ofdev->dev.of_node, "fsl,upm-cmd-offset", &size);
if (!prop || size != sizeof(uint32_t)) {
dev_err(&ofdev->dev, "can't get UPM command offset\n");
return -EINVAL;
}
fun->upm_cmd_offset = *prop;
prop = of_get_property(ofdev->dev.of_node,
"fsl,upm-addr-line-cs-offsets", &size);
if (prop && (size / sizeof(uint32_t)) > 0) {
fun->mchip_count = size / sizeof(uint32_t);
if (fun->mchip_count >= NAND_MAX_CHIPS) {
dev_err(&ofdev->dev, "too much multiple chips\n");
return -EINVAL;
}
for (i = 0; i < fun->mchip_count; i++)
fun->mchip_offsets[i] = be32_to_cpu(prop[i]);
} else {
fun->mchip_count = 1;
}
for (i = 0; i < fun->mchip_count; i++) {
fun->rnb_gpio[i] = devm_gpiod_get_index_optional(&ofdev->dev,
NULL, i,
GPIOD_IN);
if (IS_ERR(fun->rnb_gpio[i])) {
dev_err(&ofdev->dev, "RNB gpio #%d is invalid\n", i);
return PTR_ERR(fun->rnb_gpio[i]);
}
}
nand_controller_init(&fun->base);
fun->base.ops = &fun_ops;
fun->dev = &ofdev->dev;
ret = fun_chip_init(fun, ofdev->dev.of_node, io_res);
if (ret)
return ret;
dev_set_drvdata(&ofdev->dev, fun);
return 0;
}
static void fun_remove(struct platform_device *ofdev)
{
struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
struct nand_chip *chip = &fun->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
ret = mtd_device_unregister(mtd);
WARN_ON(ret);
nand_cleanup(chip);
}
static const struct of_device_id of_fun_match[] = {
{ .compatible = "fsl,upm-nand" },
{},
};
MODULE_DEVICE_TABLE(of, of_fun_match);
static struct platform_driver of_fun_driver = {
.driver = {
.name = "fsl,upm-nand",
.of_match_table = of_fun_match,
},
.probe = fun_probe,
.remove_new = fun_remove,
};
module_platform_driver(of_fun_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anton Vorontsov <[email protected]>");
MODULE_DESCRIPTION("Driver for NAND chips working through Freescale "
"LocalBus User-Programmable Machine");
| linux-master | drivers/mtd/nand/raw/fsl_upm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* davinci_nand.c - NAND Flash Driver for DaVinci family chips
*
* Copyright © 2006 Texas Instruments.
*
* Port to 2.6.23 Copyright © 2008 by:
* Sander Huijsen <[email protected]>
* Troy Kisky <[email protected]>
* Dirk Behme <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/platform_data/mtd-davinci.h>
#include <linux/platform_data/mtd-davinci-aemif.h>
/*
* This is a device driver for the NAND flash controller found on the
* various DaVinci family chips. It handles up to four SoC chipselects,
* and some flavors of secondary chipselect (e.g. based on A12) as used
* with multichip packages.
*
* The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
* available on chips like the DM355 and OMAP-L137 and needed with the
* more error-prone MLC NAND chips.
*
* This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
* outputs in a "wire-AND" configuration, with no per-chip signals.
*/
struct davinci_nand_info {
struct nand_controller controller;
struct nand_chip chip;
struct platform_device *pdev;
bool is_readmode;
void __iomem *base;
void __iomem *vaddr;
void __iomem *current_cs;
uint32_t mask_chipsel;
uint32_t mask_ale;
uint32_t mask_cle;
uint32_t core_chipsel;
struct davinci_aemif_timing *timing;
};
static DEFINE_SPINLOCK(davinci_nand_lock);
static bool ecc4_busy;
static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd)
{
return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip);
}
static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
int offset)
{
return __raw_readl(info->base + offset);
}
static inline void davinci_nand_writel(struct davinci_nand_info *info,
int offset, unsigned long value)
{
__raw_writel(value, info->base + offset);
}
/*----------------------------------------------------------------------*/
/*
* 1-bit hardware ECC ... context maintained for each core chipselect
*/
static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
{
struct davinci_nand_info *info = to_davinci_nand(mtd);
return davinci_nand_readl(info, NANDF1ECC_OFFSET
+ 4 * info->core_chipsel);
}
static void nand_davinci_hwctl_1bit(struct nand_chip *chip, int mode)
{
struct davinci_nand_info *info;
uint32_t nandcfr;
unsigned long flags;
info = to_davinci_nand(nand_to_mtd(chip));
/* Reset ECC hardware */
nand_davinci_readecc_1bit(nand_to_mtd(chip));
spin_lock_irqsave(&davinci_nand_lock, flags);
/* Restart ECC hardware */
nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
nandcfr |= BIT(8 + info->core_chipsel);
davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
spin_unlock_irqrestore(&davinci_nand_lock, flags);
}
/*
* Read hardware ECC value and pack into three bytes
*/
static int nand_davinci_calculate_1bit(struct nand_chip *chip,
const u_char *dat, u_char *ecc_code)
{
unsigned int ecc_val = nand_davinci_readecc_1bit(nand_to_mtd(chip));
unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
/* invert so that erased block ecc is correct */
ecc24 = ~ecc24;
ecc_code[0] = (u_char)(ecc24);
ecc_code[1] = (u_char)(ecc24 >> 8);
ecc_code[2] = (u_char)(ecc24 >> 16);
return 0;
}
static int nand_davinci_correct_1bit(struct nand_chip *chip, u_char *dat,
u_char *read_ecc, u_char *calc_ecc)
{
uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
(read_ecc[2] << 16);
uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
(calc_ecc[2] << 16);
uint32_t diff = eccCalc ^ eccNand;
if (diff) {
if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
/* Correctable error */
if ((diff >> (12 + 3)) < chip->ecc.size) {
dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
return 1;
} else {
return -EBADMSG;
}
} else if (!(diff & (diff - 1))) {
/* Single bit ECC error in the ECC itself,
* nothing to fix */
return 1;
} else {
/* Uncorrectable error */
return -EBADMSG;
}
}
return 0;
}
/*----------------------------------------------------------------------*/
/*
* 4-bit hardware ECC ... context maintained over entire AEMIF
*
* This is a syndrome engine, but we avoid NAND_ECC_PLACEMENT_INTERLEAVED
* since that forces use of a problematic "infix OOB" layout.
* Among other things, it trashes manufacturer bad block markers.
* Also, and specific to this hardware, it ECC-protects the "prepad"
* in the OOB ... while having ECC protection for parts of OOB would
* seem useful, the current MTD stack sometimes wants to update the
* OOB without recomputing ECC.
*/
static void nand_davinci_hwctl_4bit(struct nand_chip *chip, int mode)
{
struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
unsigned long flags;
u32 val;
/* Reset ECC hardware */
davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
spin_lock_irqsave(&davinci_nand_lock, flags);
/* Start 4-bit ECC calculation for read/write */
val = davinci_nand_readl(info, NANDFCR_OFFSET);
val &= ~(0x03 << 4);
val |= (info->core_chipsel << 4) | BIT(12);
davinci_nand_writel(info, NANDFCR_OFFSET, val);
info->is_readmode = (mode == NAND_ECC_READ);
spin_unlock_irqrestore(&davinci_nand_lock, flags);
}
/* Read raw ECC code after writing to NAND. */
static void
nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
{
const u32 mask = 0x03ff03ff;
code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
}
/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
static int nand_davinci_calculate_4bit(struct nand_chip *chip,
const u_char *dat, u_char *ecc_code)
{
struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
u32 raw_ecc[4], *p;
unsigned i;
/* After a read, terminate ECC calculation by a dummy read
* of some 4-bit ECC register. ECC covers everything that
* was read; correct() just uses the hardware state, so
* ecc_code is not needed.
*/
if (info->is_readmode) {
davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
return 0;
}
/* Pack eight raw 10-bit ecc values into ten bytes, making
* two passes which each convert four values (in upper and
* lower halves of two 32-bit words) into five bytes. The
* ROM boot loader uses this same packing scheme.
*/
nand_davinci_readecc_4bit(info, raw_ecc);
for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
*ecc_code++ = p[0] & 0xff;
*ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
*ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
*ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
*ecc_code++ = (p[1] >> 18) & 0xff;
}
return 0;
}
/* Correct up to 4 bits in data we just read, using state left in the
* hardware plus the ecc_code computed when it was first written.
*/
static int nand_davinci_correct_4bit(struct nand_chip *chip, u_char *data,
u_char *ecc_code, u_char *null)
{
int i;
struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
unsigned short ecc10[8];
unsigned short *ecc16;
u32 syndrome[4];
u32 ecc_state;
unsigned num_errors, corrected;
unsigned long timeo;
/* Unpack ten bytes into eight 10 bit values. We know we're
* little-endian, and use type punning for less shifting/masking.
*/
if (WARN_ON(0x01 & (uintptr_t)ecc_code))
return -EINVAL;
ecc16 = (unsigned short *)ecc_code;
ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
/* Tell ECC controller about the expected ECC codes. */
for (i = 7; i >= 0; i--)
davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
/* Allow time for syndrome calculation ... then read it.
* A syndrome of all zeroes 0 means no detected errors.
*/
davinci_nand_readl(info, NANDFSR_OFFSET);
nand_davinci_readecc_4bit(info, syndrome);
if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
return 0;
/*
* Clear any previous address calculation by doing a dummy read of an
* error address register.
*/
davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
/* Start address calculation, and wait for it to complete.
* We _could_ start reading more data while this is working,
* to speed up the overall page read.
*/
davinci_nand_writel(info, NANDFCR_OFFSET,
davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
/*
* ECC_STATE field reads 0x3 (Error correction complete) immediately
* after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
* begin trying to poll for the state, you may fall right out of your
* loop without any of the correction calculations having taken place.
* The recommendation from the hardware team is to initially delay as
* long as ECC_STATE reads less than 4. After that, ECC HW has entered
* correction state.
*/
timeo = jiffies + usecs_to_jiffies(100);
do {
ecc_state = (davinci_nand_readl(info,
NANDFSR_OFFSET) >> 8) & 0x0f;
cpu_relax();
} while ((ecc_state < 4) && time_before(jiffies, timeo));
for (;;) {
u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
switch ((fsr >> 8) & 0x0f) {
case 0: /* no error, should not happen */
davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
return 0;
case 1: /* five or more errors detected */
davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
return -EBADMSG;
case 2: /* error addresses computed */
case 3:
num_errors = 1 + ((fsr >> 16) & 0x03);
goto correct;
default: /* still working on it */
cpu_relax();
continue;
}
}
correct:
/* correct each error */
for (i = 0, corrected = 0; i < num_errors; i++) {
int error_address, error_value;
if (i > 1) {
error_address = davinci_nand_readl(info,
NAND_ERR_ADD2_OFFSET);
error_value = davinci_nand_readl(info,
NAND_ERR_ERRVAL2_OFFSET);
} else {
error_address = davinci_nand_readl(info,
NAND_ERR_ADD1_OFFSET);
error_value = davinci_nand_readl(info,
NAND_ERR_ERRVAL1_OFFSET);
}
if (i & 1) {
error_address >>= 16;
error_value >>= 16;
}
error_address &= 0x3ff;
error_address = (512 + 7) - error_address;
if (error_address < 512) {
data[error_address] ^= error_value;
corrected++;
}
}
return corrected;
}
/*----------------------------------------------------------------------*/
/* An ECC layout for using 4-bit ECC with small-page flash, storing
* ten ECC bytes plus the manufacturer's bad block marker byte, and
* and not overlapping the default BBT markers.
*/
static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section > 2)
return -ERANGE;
if (!section) {
oobregion->offset = 0;
oobregion->length = 5;
} else if (section == 1) {
oobregion->offset = 6;
oobregion->length = 2;
} else {
oobregion->offset = 13;
oobregion->length = 3;
}
return 0;
}
static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section > 1)
return -ERANGE;
if (!section) {
oobregion->offset = 8;
oobregion->length = 5;
} else {
oobregion->offset = 16;
oobregion->length = mtd->oobsize - 16;
}
return 0;
}
static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
.ecc = hwecc4_ooblayout_small_ecc,
.free = hwecc4_ooblayout_small_free,
};
#if defined(CONFIG_OF)
static const struct of_device_id davinci_nand_of_match[] = {
{.compatible = "ti,davinci-nand", },
{.compatible = "ti,keystone-nand", },
{},
};
MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
static struct davinci_nand_pdata
*nand_davinci_get_pdata(struct platform_device *pdev)
{
if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
struct davinci_nand_pdata *pdata;
const char *mode;
u32 prop;
pdata = devm_kzalloc(&pdev->dev,
sizeof(struct davinci_nand_pdata),
GFP_KERNEL);
pdev->dev.platform_data = pdata;
if (!pdata)
return ERR_PTR(-ENOMEM);
if (!of_property_read_u32(pdev->dev.of_node,
"ti,davinci-chipselect", &prop))
pdata->core_chipsel = prop;
else
return ERR_PTR(-EINVAL);
if (!of_property_read_u32(pdev->dev.of_node,
"ti,davinci-mask-ale", &prop))
pdata->mask_ale = prop;
if (!of_property_read_u32(pdev->dev.of_node,
"ti,davinci-mask-cle", &prop))
pdata->mask_cle = prop;
if (!of_property_read_u32(pdev->dev.of_node,
"ti,davinci-mask-chipsel", &prop))
pdata->mask_chipsel = prop;
if (!of_property_read_string(pdev->dev.of_node,
"ti,davinci-ecc-mode", &mode)) {
if (!strncmp("none", mode, 4))
pdata->engine_type = NAND_ECC_ENGINE_TYPE_NONE;
if (!strncmp("soft", mode, 4))
pdata->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
if (!strncmp("hw", mode, 2))
pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
}
if (!of_property_read_u32(pdev->dev.of_node,
"ti,davinci-ecc-bits", &prop))
pdata->ecc_bits = prop;
if (!of_property_read_u32(pdev->dev.of_node,
"ti,davinci-nand-buswidth", &prop) && prop == 16)
pdata->options |= NAND_BUSWIDTH_16;
if (of_property_read_bool(pdev->dev.of_node,
"ti,davinci-nand-use-bbt"))
pdata->bbt_options = NAND_BBT_USE_FLASH;
/*
* Since kernel v4.8, this driver has been fixed to enable
* use of 4-bit hardware ECC with subpages and verified on
* TI's keystone EVMs (K2L, K2HK and K2E).
* However, in the interest of not breaking systems using
* existing UBI partitions, sub-page writes are not being
* (re)enabled. If you want to use subpage writes on Keystone
* platforms (i.e. do not have any existing UBI partitions),
* then use "ti,davinci-nand" as the compatible in your
* device-tree file.
*/
if (of_device_is_compatible(pdev->dev.of_node,
"ti,keystone-nand")) {
pdata->options |= NAND_NO_SUBPAGE_WRITE;
}
}
return dev_get_platdata(&pdev->dev);
}
#else
static struct davinci_nand_pdata
*nand_davinci_get_pdata(struct platform_device *pdev)
{
return dev_get_platdata(&pdev->dev);
}
#endif
static int davinci_nand_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct davinci_nand_info *info = to_davinci_nand(mtd);
struct davinci_nand_pdata *pdata = nand_davinci_get_pdata(info->pdev);
int ret = 0;
if (IS_ERR(pdata))
return PTR_ERR(pdata);
/* Use board-specific ECC config */
chip->ecc.engine_type = pdata->engine_type;
chip->ecc.placement = pdata->ecc_placement;
switch (chip->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_NONE:
pdata->ecc_bits = 0;
break;
case NAND_ECC_ENGINE_TYPE_SOFT:
pdata->ecc_bits = 0;
/*
* This driver expects Hamming based ECC when engine_type is set
* to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to
* NAND_ECC_ALGO_HAMMING to avoid adding an extra ->ecc_algo
* field to davinci_nand_pdata.
*/
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
break;
case NAND_ECC_ENGINE_TYPE_ON_HOST:
if (pdata->ecc_bits == 4) {
int chunks = mtd->writesize / 512;
if (!chunks || mtd->oobsize < 16) {
dev_dbg(&info->pdev->dev, "too small\n");
return -EINVAL;
}
/*
* No sanity checks: CPUs must support this,
* and the chips may not use NAND_BUSWIDTH_16.
*/
/* No sharing 4-bit hardware between chipselects yet */
spin_lock_irq(&davinci_nand_lock);
if (ecc4_busy)
ret = -EBUSY;
else
ecc4_busy = true;
spin_unlock_irq(&davinci_nand_lock);
if (ret == -EBUSY)
return ret;
chip->ecc.calculate = nand_davinci_calculate_4bit;
chip->ecc.correct = nand_davinci_correct_4bit;
chip->ecc.hwctl = nand_davinci_hwctl_4bit;
chip->ecc.bytes = 10;
chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
chip->ecc.algo = NAND_ECC_ALGO_BCH;
/*
* Update ECC layout if needed ... for 1-bit HW ECC, the
* default is OK, but it allocates 6 bytes when only 3
* are needed (for each 512 bytes). For 4-bit HW ECC,
* the default is not usable: 10 bytes needed, not 6.
*
* For small page chips, preserve the manufacturer's
* badblock marking data ... and make sure a flash BBT
* table marker fits in the free bytes.
*/
if (chunks == 1) {
mtd_set_ooblayout(mtd,
&hwecc4_small_ooblayout_ops);
} else if (chunks == 4 || chunks == 8) {
mtd_set_ooblayout(mtd,
nand_get_large_page_ooblayout());
chip->ecc.read_page = nand_read_page_hwecc_oob_first;
} else {
return -EIO;
}
} else {
/* 1bit ecc hamming */
chip->ecc.calculate = nand_davinci_calculate_1bit;
chip->ecc.correct = nand_davinci_correct_1bit;
chip->ecc.hwctl = nand_davinci_hwctl_1bit;
chip->ecc.bytes = 3;
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
}
chip->ecc.size = 512;
chip->ecc.strength = pdata->ecc_bits;
break;
default:
return -EINVAL;
}
return ret;
}
static void nand_davinci_data_in(struct davinci_nand_info *info, void *buf,
unsigned int len, bool force_8bit)
{
u32 alignment = ((uintptr_t)buf | len) & 3;
if (force_8bit || (alignment & 1))
ioread8_rep(info->current_cs, buf, len);
else if (alignment & 3)
ioread16_rep(info->current_cs, buf, len >> 1);
else
ioread32_rep(info->current_cs, buf, len >> 2);
}
static void nand_davinci_data_out(struct davinci_nand_info *info,
const void *buf, unsigned int len,
bool force_8bit)
{
u32 alignment = ((uintptr_t)buf | len) & 3;
if (force_8bit || (alignment & 1))
iowrite8_rep(info->current_cs, buf, len);
else if (alignment & 3)
iowrite16_rep(info->current_cs, buf, len >> 1);
else
iowrite32_rep(info->current_cs, buf, len >> 2);
}
static int davinci_nand_exec_instr(struct davinci_nand_info *info,
const struct nand_op_instr *instr)
{
unsigned int i, timeout_us;
u32 status;
int ret;
switch (instr->type) {
case NAND_OP_CMD_INSTR:
iowrite8(instr->ctx.cmd.opcode,
info->current_cs + info->mask_cle);
break;
case NAND_OP_ADDR_INSTR:
for (i = 0; i < instr->ctx.addr.naddrs; i++) {
iowrite8(instr->ctx.addr.addrs[i],
info->current_cs + info->mask_ale);
}
break;
case NAND_OP_DATA_IN_INSTR:
nand_davinci_data_in(info, instr->ctx.data.buf.in,
instr->ctx.data.len,
instr->ctx.data.force_8bit);
break;
case NAND_OP_DATA_OUT_INSTR:
nand_davinci_data_out(info, instr->ctx.data.buf.out,
instr->ctx.data.len,
instr->ctx.data.force_8bit);
break;
case NAND_OP_WAITRDY_INSTR:
timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET,
status, status & BIT(0), 100,
timeout_us);
if (ret)
return ret;
break;
}
if (instr->delay_ns)
ndelay(instr->delay_ns);
return 0;
}
static int davinci_nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
unsigned int i;
if (check_only)
return 0;
info->current_cs = info->vaddr + (op->cs * info->mask_chipsel);
for (i = 0; i < op->ninstrs; i++) {
int ret;
ret = davinci_nand_exec_instr(info, &op->instrs[i]);
if (ret)
return ret;
}
return 0;
}
static const struct nand_controller_ops davinci_nand_controller_ops = {
.attach_chip = davinci_nand_attach_chip,
.exec_op = davinci_nand_exec_op,
};
static int nand_davinci_probe(struct platform_device *pdev)
{
struct davinci_nand_pdata *pdata;
struct davinci_nand_info *info;
struct resource *res1;
struct resource *res2;
void __iomem *vaddr;
void __iomem *base;
int ret;
uint32_t val;
struct mtd_info *mtd;
pdata = nand_davinci_get_pdata(pdev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
/* insist on board-specific configuration */
if (!pdata)
return -ENODEV;
/* which external chipselect will we be managing? */
if (pdata->core_chipsel > 3)
return -ENODEV;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
platform_set_drvdata(pdev, info);
res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res1 || !res2) {
dev_err(&pdev->dev, "resource missing\n");
return -EINVAL;
}
vaddr = devm_ioremap_resource(&pdev->dev, res1);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
/*
* This registers range is used to setup NAND settings. In case with
* TI AEMIF driver, the same memory address range is requested already
* by AEMIF, so we cannot request it twice, just ioremap.
* The AEMIF and NAND drivers not use the same registers in this range.
*/
base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
if (!base) {
dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
return -EADDRNOTAVAIL;
}
info->pdev = pdev;
info->base = base;
info->vaddr = vaddr;
mtd = nand_to_mtd(&info->chip);
mtd->dev.parent = &pdev->dev;
nand_set_flash_node(&info->chip, pdev->dev.of_node);
/* options such as NAND_BBT_USE_FLASH */
info->chip.bbt_options = pdata->bbt_options;
/* options such as 16-bit widths */
info->chip.options = pdata->options;
info->chip.bbt_td = pdata->bbt_td;
info->chip.bbt_md = pdata->bbt_md;
info->timing = pdata->timing;
info->current_cs = info->vaddr;
info->core_chipsel = pdata->core_chipsel;
info->mask_chipsel = pdata->mask_chipsel;
/* use nandboot-capable ALE/CLE masks by default */
info->mask_ale = pdata->mask_ale ? : MASK_ALE;
info->mask_cle = pdata->mask_cle ? : MASK_CLE;
spin_lock_irq(&davinci_nand_lock);
/* put CSxNAND into NAND mode */
val = davinci_nand_readl(info, NANDFCR_OFFSET);
val |= BIT(info->core_chipsel);
davinci_nand_writel(info, NANDFCR_OFFSET, val);
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
nand_controller_init(&info->controller);
info->controller.ops = &davinci_nand_controller_ops;
info->chip.controller = &info->controller;
ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
return ret;
}
if (pdata->parts)
ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
else
ret = mtd_device_register(mtd, NULL, 0);
if (ret < 0)
goto err_cleanup_nand;
val = davinci_nand_readl(info, NRCSR_OFFSET);
dev_info(&pdev->dev, "controller rev. %d.%d\n",
(val >> 8) & 0xff, val & 0xff);
return 0;
err_cleanup_nand:
nand_cleanup(&info->chip);
return ret;
}
static void nand_davinci_remove(struct platform_device *pdev)
{
struct davinci_nand_info *info = platform_get_drvdata(pdev);
struct nand_chip *chip = &info->chip;
int ret;
spin_lock_irq(&davinci_nand_lock);
if (chip->ecc.placement == NAND_ECC_PLACEMENT_INTERLEAVED)
ecc4_busy = false;
spin_unlock_irq(&davinci_nand_lock);
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
}
static struct platform_driver nand_davinci_driver = {
.probe = nand_davinci_probe,
.remove_new = nand_davinci_remove,
.driver = {
.name = "davinci_nand",
.of_match_table = of_match_ptr(davinci_nand_of_match),
},
};
MODULE_ALIAS("platform:davinci_nand");
module_platform_driver(nand_davinci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("Davinci NAND flash driver");
| linux-master | drivers/mtd/nand/raw/davinci_nand.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Freescale Enhanced Local Bus Controller NAND driver
*
* Copyright © 2006-2007, 2010 Freescale Semiconductor
*
* Authors: Nick Spence <[email protected]>,
* Scott Wood <[email protected]>
* Jack Lan <[email protected]>
* Roy Zang <[email protected]>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
#include <asm/fsl_lbc.h>
#define MAX_BANKS 8
#define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */
#define FCM_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait for FCM */
/* mtd information per set */
struct fsl_elbc_mtd {
struct nand_chip chip;
struct fsl_lbc_ctrl *ctrl;
struct device *dev;
int bank; /* Chip select bank number */
u8 __iomem *vbase; /* Chip select base virtual address */
int page_size; /* NAND page size (0=512, 1=2048) */
unsigned int fmr; /* FCM Flash Mode Register value */
};
/* Freescale eLBC FCM controller information */
struct fsl_elbc_fcm_ctrl {
struct nand_controller controller;
struct fsl_elbc_mtd *chips[MAX_BANKS];
u8 __iomem *addr; /* Address of assigned FCM buffer */
unsigned int page; /* Last page written to / read from */
unsigned int read_bytes; /* Number of bytes read during command */
unsigned int column; /* Saved column from SEQIN */
unsigned int index; /* Pointer to next byte to 'read' */
unsigned int status; /* status read from LTESR after last op */
unsigned int mdr; /* UPM/FCM Data Register value */
unsigned int use_mdr; /* Non zero if the MDR is to be set */
unsigned int oob; /* Non zero if operating on OOB data */
unsigned int counter; /* counter for the initializations */
unsigned int max_bitflips; /* Saved during READ0 cmd */
};
/* These map to the positions used by the FCM hardware ECC generator */
static int fsl_elbc_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
if (section >= chip->ecc.steps)
return -ERANGE;
oobregion->offset = (16 * section) + 6;
if (priv->fmr & FMR_ECCM)
oobregion->offset += 2;
oobregion->length = chip->ecc.bytes;
return 0;
}
static int fsl_elbc_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
if (section > chip->ecc.steps)
return -ERANGE;
if (!section) {
oobregion->offset = 0;
if (mtd->writesize > 512)
oobregion->offset++;
oobregion->length = (priv->fmr & FMR_ECCM) ? 7 : 5;
} else {
oobregion->offset = (16 * section) -
((priv->fmr & FMR_ECCM) ? 5 : 7);
if (section < chip->ecc.steps)
oobregion->length = 13;
else
oobregion->length = mtd->oobsize - oobregion->offset;
}
return 0;
}
static const struct mtd_ooblayout_ops fsl_elbc_ooblayout_ops = {
.ecc = fsl_elbc_ooblayout_ecc,
.free = fsl_elbc_ooblayout_free,
};
/*
* ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
* interfere with ECC positions, that's why we implement our own descriptors.
* OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
*/
static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
static struct nand_bbt_descr bbt_main_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
NAND_BBT_2BIT | NAND_BBT_VERSION,
.offs = 11,
.len = 4,
.veroffs = 15,
.maxblocks = 4,
.pattern = bbt_pattern,
};
static struct nand_bbt_descr bbt_mirror_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
NAND_BBT_2BIT | NAND_BBT_VERSION,
.offs = 11,
.len = 4,
.veroffs = 15,
.maxblocks = 4,
.pattern = mirror_pattern,
};
/*=================================*/
/*
* Set up the FCM hardware block and page address fields, and the fcm
* structure addr field to point to the correct FCM buffer in memory
*/
static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
int buf_num;
elbc_fcm_ctrl->page = page_addr;
if (priv->page_size) {
/*
* large page size chip : FPAR[PI] save the lowest 6 bits,
* FBAR[BLK] save the other bits.
*/
out_be32(&lbc->fbar, page_addr >> 6);
out_be32(&lbc->fpar,
((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
(oob ? FPAR_LP_MS : 0) | column);
buf_num = (page_addr & 1) << 2;
} else {
/*
* small page size chip : FPAR[PI] save the lowest 5 bits,
* FBAR[BLK] save the other bits.
*/
out_be32(&lbc->fbar, page_addr >> 5);
out_be32(&lbc->fpar,
((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
(oob ? FPAR_SP_MS : 0) | column);
buf_num = page_addr & 7;
}
elbc_fcm_ctrl->addr = priv->vbase + buf_num * 1024;
elbc_fcm_ctrl->index = column;
/* for OOB data point to the second half of the buffer */
if (oob)
elbc_fcm_ctrl->index += priv->page_size ? 2048 : 512;
dev_vdbg(priv->dev, "set_addr: bank=%d, "
"elbc_fcm_ctrl->addr=0x%p (0x%p), "
"index %x, pes %d ps %d\n",
buf_num, elbc_fcm_ctrl->addr, priv->vbase,
elbc_fcm_ctrl->index,
chip->phys_erase_shift, chip->page_shift);
}
/*
* execute FCM command and wait for it to complete
*/
static int fsl_elbc_run_command(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
/* Setup the FMR[OP] to execute without write protection */
out_be32(&lbc->fmr, priv->fmr | 3);
if (elbc_fcm_ctrl->use_mdr)
out_be32(&lbc->mdr, elbc_fcm_ctrl->mdr);
dev_vdbg(priv->dev,
"fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n",
in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr));
dev_vdbg(priv->dev,
"fsl_elbc_run_command: fbar=%08x fpar=%08x "
"fbcr=%08x bank=%d\n",
in_be32(&lbc->fbar), in_be32(&lbc->fpar),
in_be32(&lbc->fbcr), priv->bank);
ctrl->irq_status = 0;
/* execute special operation */
out_be32(&lbc->lsor, priv->bank);
/* wait for FCM complete flag or timeout */
wait_event_timeout(ctrl->irq_wait, ctrl->irq_status,
FCM_TIMEOUT_MSECS * HZ/1000);
elbc_fcm_ctrl->status = ctrl->irq_status;
/* store mdr value in case it was needed */
if (elbc_fcm_ctrl->use_mdr)
elbc_fcm_ctrl->mdr = in_be32(&lbc->mdr);
elbc_fcm_ctrl->use_mdr = 0;
if (elbc_fcm_ctrl->status != LTESR_CC) {
dev_info(priv->dev,
"command failed: fir %x fcr %x status %x mdr %x\n",
in_be32(&lbc->fir), in_be32(&lbc->fcr),
elbc_fcm_ctrl->status, elbc_fcm_ctrl->mdr);
return -EIO;
}
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return 0;
elbc_fcm_ctrl->max_bitflips = 0;
if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
uint32_t lteccr = in_be32(&lbc->lteccr);
/*
* if command was a full page read and the ELBC
* has the LTECCR register, then bits 12-15 (ppc order) of
* LTECCR indicates which 512 byte sub-pages had fixed errors.
* bits 28-31 are uncorrectable errors, marked elsewhere.
* for small page nand only 1 bit is used.
* if the ELBC doesn't have the lteccr register it reads 0
* FIXME: 4 bits can be corrected on NANDs with 2k pages, so
* count the number of sub-pages with bitflips and update
* ecc_stats.corrected accordingly.
*/
if (lteccr & 0x000F000F)
out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
if (lteccr & 0x000F0000) {
mtd->ecc_stats.corrected++;
elbc_fcm_ctrl->max_bitflips = 1;
}
}
return 0;
}
static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
{
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
if (priv->page_size) {
out_be32(&lbc->fir,
(FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_CA << FIR_OP1_SHIFT) |
(FIR_OP_PA << FIR_OP2_SHIFT) |
(FIR_OP_CM1 << FIR_OP3_SHIFT) |
(FIR_OP_RBW << FIR_OP4_SHIFT));
out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
(NAND_CMD_READSTART << FCR_CMD1_SHIFT));
} else {
out_be32(&lbc->fir,
(FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_CA << FIR_OP1_SHIFT) |
(FIR_OP_PA << FIR_OP2_SHIFT) |
(FIR_OP_RBW << FIR_OP3_SHIFT));
if (oob)
out_be32(&lbc->fcr, NAND_CMD_READOOB << FCR_CMD0_SHIFT);
else
out_be32(&lbc->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT);
}
}
/* cmdfunc send commands to the FCM */
static void fsl_elbc_cmdfunc(struct nand_chip *chip, unsigned int command,
int column, int page_addr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
elbc_fcm_ctrl->use_mdr = 0;
/* clear the read buffer */
elbc_fcm_ctrl->read_bytes = 0;
if (command != NAND_CMD_PAGEPROG)
elbc_fcm_ctrl->index = 0;
switch (command) {
/* READ0 and READ1 read the entire buffer to use hardware ECC. */
case NAND_CMD_READ1:
column += 256;
fallthrough;
case NAND_CMD_READ0:
dev_dbg(priv->dev,
"fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
" 0x%x, column: 0x%x.\n", page_addr, column);
out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */
set_addr(mtd, 0, page_addr, 0);
elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
elbc_fcm_ctrl->index += column;
fsl_elbc_do_read(chip, 0);
fsl_elbc_run_command(mtd);
return;
/* RNDOUT moves the pointer inside the page */
case NAND_CMD_RNDOUT:
dev_dbg(priv->dev,
"fsl_elbc_cmdfunc: NAND_CMD_RNDOUT, column: 0x%x.\n",
column);
elbc_fcm_ctrl->index = column;
return;
/* READOOB reads only the OOB because no ECC is performed. */
case NAND_CMD_READOOB:
dev_vdbg(priv->dev,
"fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:"
" 0x%x, column: 0x%x.\n", page_addr, column);
out_be32(&lbc->fbcr, mtd->oobsize - column);
set_addr(mtd, column, page_addr, 1);
elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
fsl_elbc_do_read(chip, 1);
fsl_elbc_run_command(mtd);
return;
case NAND_CMD_READID:
case NAND_CMD_PARAM:
dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD %x\n", command);
out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_UA << FIR_OP1_SHIFT) |
(FIR_OP_RBW << FIR_OP2_SHIFT));
out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT);
/*
* although currently it's 8 bytes for READID, we always read
* the maximum 256 bytes(for PARAM)
*/
out_be32(&lbc->fbcr, 256);
elbc_fcm_ctrl->read_bytes = 256;
elbc_fcm_ctrl->use_mdr = 1;
elbc_fcm_ctrl->mdr = column;
set_addr(mtd, 0, 0, 0);
fsl_elbc_run_command(mtd);
return;
/* ERASE1 stores the block and page address */
case NAND_CMD_ERASE1:
dev_vdbg(priv->dev,
"fsl_elbc_cmdfunc: NAND_CMD_ERASE1, "
"page_addr: 0x%x.\n", page_addr);
set_addr(mtd, 0, page_addr, 0);
return;
/* ERASE2 uses the block and page address from ERASE1 */
case NAND_CMD_ERASE2:
dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
out_be32(&lbc->fir,
(FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_PA << FIR_OP1_SHIFT) |
(FIR_OP_CM2 << FIR_OP2_SHIFT) |
(FIR_OP_CW1 << FIR_OP3_SHIFT) |
(FIR_OP_RS << FIR_OP4_SHIFT));
out_be32(&lbc->fcr,
(NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) |
(NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
(NAND_CMD_ERASE2 << FCR_CMD2_SHIFT));
out_be32(&lbc->fbcr, 0);
elbc_fcm_ctrl->read_bytes = 0;
elbc_fcm_ctrl->use_mdr = 1;
fsl_elbc_run_command(mtd);
return;
/* SEQIN sets up the addr buffer and all registers except the length */
case NAND_CMD_SEQIN: {
__be32 fcr;
dev_vdbg(priv->dev,
"fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, "
"page_addr: 0x%x, column: 0x%x.\n",
page_addr, column);
elbc_fcm_ctrl->column = column;
elbc_fcm_ctrl->use_mdr = 1;
if (column >= mtd->writesize) {
/* OOB area */
column -= mtd->writesize;
elbc_fcm_ctrl->oob = 1;
} else {
WARN_ON(column != 0);
elbc_fcm_ctrl->oob = 0;
}
fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
(NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
(NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
if (priv->page_size) {
out_be32(&lbc->fir,
(FIR_OP_CM2 << FIR_OP0_SHIFT) |
(FIR_OP_CA << FIR_OP1_SHIFT) |
(FIR_OP_PA << FIR_OP2_SHIFT) |
(FIR_OP_WB << FIR_OP3_SHIFT) |
(FIR_OP_CM3 << FIR_OP4_SHIFT) |
(FIR_OP_CW1 << FIR_OP5_SHIFT) |
(FIR_OP_RS << FIR_OP6_SHIFT));
} else {
out_be32(&lbc->fir,
(FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_CM2 << FIR_OP1_SHIFT) |
(FIR_OP_CA << FIR_OP2_SHIFT) |
(FIR_OP_PA << FIR_OP3_SHIFT) |
(FIR_OP_WB << FIR_OP4_SHIFT) |
(FIR_OP_CM3 << FIR_OP5_SHIFT) |
(FIR_OP_CW1 << FIR_OP6_SHIFT) |
(FIR_OP_RS << FIR_OP7_SHIFT));
if (elbc_fcm_ctrl->oob)
/* OOB area --> READOOB */
fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
else
/* First 256 bytes --> READ0 */
fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
}
out_be32(&lbc->fcr, fcr);
set_addr(mtd, column, page_addr, elbc_fcm_ctrl->oob);
return;
}
/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
case NAND_CMD_PAGEPROG: {
dev_vdbg(priv->dev,
"fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
"writing %d bytes.\n", elbc_fcm_ctrl->index);
/* if the write did not start at 0 or is not a full page
* then set the exact length, otherwise use a full page
* write so the HW generates the ECC.
*/
if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
out_be32(&lbc->fbcr,
elbc_fcm_ctrl->index - elbc_fcm_ctrl->column);
else
out_be32(&lbc->fbcr, 0);
fsl_elbc_run_command(mtd);
return;
}
/* CMD_STATUS must read the status byte while CEB is active */
/* Note - it does not wait for the ready line */
case NAND_CMD_STATUS:
out_be32(&lbc->fir,
(FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_RBW << FIR_OP1_SHIFT));
out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
out_be32(&lbc->fbcr, 1);
set_addr(mtd, 0, 0, 0);
elbc_fcm_ctrl->read_bytes = 1;
fsl_elbc_run_command(mtd);
/* The chip always seems to report that it is
* write-protected, even when it is not.
*/
setbits8(elbc_fcm_ctrl->addr, NAND_STATUS_WP);
return;
/* RESET without waiting for the ready line */
case NAND_CMD_RESET:
dev_dbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n");
out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT);
out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT);
fsl_elbc_run_command(mtd);
return;
default:
dev_err(priv->dev,
"fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n",
command);
}
}
static void fsl_elbc_select_chip(struct nand_chip *chip, int cs)
{
/* The hardware does not seem to support multiple
* chips per bank.
*/
}
/*
* Write buf to the FCM Controller Data Buffer
*/
static void fsl_elbc_write_buf(struct nand_chip *chip, const u8 *buf, int len)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
unsigned int bufsize = mtd->writesize + mtd->oobsize;
if (len <= 0) {
dev_err(priv->dev, "write_buf of %d bytes", len);
elbc_fcm_ctrl->status = 0;
return;
}
if ((unsigned int)len > bufsize - elbc_fcm_ctrl->index) {
dev_err(priv->dev,
"write_buf beyond end of buffer "
"(%d requested, %u available)\n",
len, bufsize - elbc_fcm_ctrl->index);
len = bufsize - elbc_fcm_ctrl->index;
}
memcpy_toio(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], buf, len);
/*
* This is workaround for the weird elbc hangs during nand write,
* Scott Wood says: "...perhaps difference in how long it takes a
* write to make it through the localbus compared to a write to IMMR
* is causing problems, and sync isn't helping for some reason."
* Reading back the last byte helps though.
*/
in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index] + len - 1);
elbc_fcm_ctrl->index += len;
}
/*
* read a byte from either the FCM hardware buffer if it has any data left
* otherwise issue a command to read a single byte.
*/
static u8 fsl_elbc_read_byte(struct nand_chip *chip)
{
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
/* If there are still bytes in the FCM, then use the next byte. */
if (elbc_fcm_ctrl->index < elbc_fcm_ctrl->read_bytes)
return in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index++]);
dev_err(priv->dev, "read_byte beyond end of buffer\n");
return ERR_BYTE;
}
/*
* Read from the FCM Controller Data Buffer
*/
static void fsl_elbc_read_buf(struct nand_chip *chip, u8 *buf, int len)
{
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
int avail;
if (len < 0)
return;
avail = min((unsigned int)len,
elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index);
memcpy_fromio(buf, &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], avail);
elbc_fcm_ctrl->index += avail;
if (len > avail)
dev_err(priv->dev,
"read_buf beyond end of buffer "
"(%d requested, %d available)\n",
len, avail);
}
/* This function is called after Program and Erase Operations to
* check for success or failure.
*/
static int fsl_elbc_wait(struct nand_chip *chip)
{
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
if (elbc_fcm_ctrl->status != LTESR_CC)
return NAND_STATUS_FAIL;
/* The chip always seems to report that it is
* write-protected, even when it is not.
*/
return (elbc_fcm_ctrl->mdr & 0xff) | NAND_STATUS_WP;
}
static int fsl_elbc_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
fsl_elbc_read_buf(chip, chip->oob_poi, mtd->oobsize);
if (fsl_elbc_wait(chip) & NAND_STATUS_FAIL)
mtd->ecc_stats.failed++;
return elbc_fcm_ctrl->max_bitflips;
}
/* ECC will be calculated automatically, and errors will be detected in
* waitfunc.
*/
static int fsl_elbc_write_page(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
fsl_elbc_write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
/* ECC will be calculated automatically, and errors will be detected in
* waitfunc.
*/
static int fsl_elbc_write_subpage(struct nand_chip *chip, uint32_t offset,
uint32_t data_len, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
fsl_elbc_write_buf(chip, buf, mtd->writesize);
fsl_elbc_write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
{
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
struct nand_chip *chip = &priv->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
dev_dbg(priv->dev, "eLBC Set Information for bank %d\n", priv->bank);
/* Fill in fsl_elbc_mtd structure */
mtd->dev.parent = priv->dev;
nand_set_flash_node(chip, priv->dev->of_node);
/* set timeout to maximum */
priv->fmr = 15 << FMR_CWTO_SHIFT;
if (in_be32(&lbc->bank[priv->bank].or) & OR_FCM_PGS)
priv->fmr |= FMR_ECCM;
/* fill in nand_chip structure */
/* set up function call table */
chip->legacy.read_byte = fsl_elbc_read_byte;
chip->legacy.write_buf = fsl_elbc_write_buf;
chip->legacy.read_buf = fsl_elbc_read_buf;
chip->legacy.select_chip = fsl_elbc_select_chip;
chip->legacy.cmdfunc = fsl_elbc_cmdfunc;
chip->legacy.waitfunc = fsl_elbc_wait;
chip->legacy.set_features = nand_get_set_features_notsupp;
chip->legacy.get_features = nand_get_set_features_notsupp;
chip->bbt_td = &bbt_main_descr;
chip->bbt_md = &bbt_mirror_descr;
/* set up nand options */
chip->bbt_options = NAND_BBT_USE_FLASH;
chip->controller = &elbc_fcm_ctrl->controller;
nand_set_controller_data(chip, priv);
return 0;
}
static int fsl_elbc_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
unsigned int al;
u32 br;
/*
* if ECC was not chosen in DT, decide whether to use HW or SW ECC from
* CS Base Register
*/
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID) {
/* If CS Base Register selects full hardware ECC then use it */
if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
BR_DECC_CHK_GEN) {
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
} else {
/* otherwise fall back to default software ECC */
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
}
}
switch (chip->ecc.engine_type) {
/* if HW ECC was chosen, setup ecc and oob layout */
case NAND_ECC_ENGINE_TYPE_ON_HOST:
chip->ecc.read_page = fsl_elbc_read_page;
chip->ecc.write_page = fsl_elbc_write_page;
chip->ecc.write_subpage = fsl_elbc_write_subpage;
mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
chip->ecc.size = 512;
chip->ecc.bytes = 3;
chip->ecc.strength = 1;
break;
/* if none or SW ECC was chosen, we do not need to set anything here */
case NAND_ECC_ENGINE_TYPE_NONE:
case NAND_ECC_ENGINE_TYPE_SOFT:
case NAND_ECC_ENGINE_TYPE_ON_DIE:
break;
default:
return -EINVAL;
}
/* enable/disable HW ECC checking and generating based on if HW ECC was chosen */
br = in_be32(&lbc->bank[priv->bank].br) & ~BR_DECC;
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
out_be32(&lbc->bank[priv->bank].br, br | BR_DECC_CHK_GEN);
else
out_be32(&lbc->bank[priv->bank].br, br | BR_DECC_OFF);
/* calculate FMR Address Length field */
al = 0;
if (chip->pagemask & 0xffff0000)
al++;
if (chip->pagemask & 0xff000000)
al++;
priv->fmr |= al << FMR_AL_SHIFT;
dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
nanddev_ntargets(&chip->base));
dev_dbg(priv->dev, "fsl_elbc_init: nand->chipsize = %lld\n",
nanddev_target_size(&chip->base));
dev_dbg(priv->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
chip->pagemask);
dev_dbg(priv->dev, "fsl_elbc_init: nand->legacy.chip_delay = %d\n",
chip->legacy.chip_delay);
dev_dbg(priv->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
chip->badblockpos);
dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
chip->chip_shift);
dev_dbg(priv->dev, "fsl_elbc_init: nand->page_shift = %d\n",
chip->page_shift);
dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
chip->phys_erase_shift);
dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.engine_type = %d\n",
chip->ecc.engine_type);
dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
chip->ecc.steps);
dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
chip->ecc.bytes);
dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
chip->ecc.total);
dev_dbg(priv->dev, "fsl_elbc_init: mtd->ooblayout = %p\n",
mtd->ooblayout);
dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
mtd->erasesize);
dev_dbg(priv->dev, "fsl_elbc_init: mtd->writesize = %d\n",
mtd->writesize);
dev_dbg(priv->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
mtd->oobsize);
/* adjust Option Register and ECC to match Flash page size */
if (mtd->writesize == 512) {
priv->page_size = 0;
clrbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
} else if (mtd->writesize == 2048) {
priv->page_size = 1;
setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
} else {
dev_err(priv->dev,
"fsl_elbc_init: page size %d is not supported\n",
mtd->writesize);
return -ENOTSUPP;
}
return 0;
}
static const struct nand_controller_ops fsl_elbc_controller_ops = {
.attach_chip = fsl_elbc_attach_chip,
};
static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
{
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
struct mtd_info *mtd = nand_to_mtd(&priv->chip);
kfree(mtd->name);
if (priv->vbase)
iounmap(priv->vbase);
elbc_fcm_ctrl->chips[priv->bank] = NULL;
kfree(priv);
return 0;
}
static DEFINE_MUTEX(fsl_elbc_nand_mutex);
static int fsl_elbc_nand_probe(struct platform_device *pdev)
{
struct fsl_lbc_regs __iomem *lbc;
struct fsl_elbc_mtd *priv;
struct resource res;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
static const char *part_probe_types[]
= { "cmdlinepart", "RedBoot", "ofpart", NULL };
int ret;
int bank;
struct device *dev;
struct device_node *node = pdev->dev.of_node;
struct mtd_info *mtd;
if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
return -ENODEV;
lbc = fsl_lbc_ctrl_dev->regs;
dev = fsl_lbc_ctrl_dev->dev;
/* get, allocate and map the memory resource */
ret = of_address_to_resource(node, 0, &res);
if (ret) {
dev_err(dev, "failed to get resource\n");
return ret;
}
/* find which chip select it is connected to */
for (bank = 0; bank < MAX_BANKS; bank++)
if ((in_be32(&lbc->bank[bank].br) & BR_V) &&
(in_be32(&lbc->bank[bank].br) & BR_MSEL) == BR_MS_FCM &&
(in_be32(&lbc->bank[bank].br) &
in_be32(&lbc->bank[bank].or) & BR_BA)
== fsl_lbc_addr(res.start))
break;
if (bank >= MAX_BANKS) {
dev_err(dev, "address did not match any chip selects\n");
return -ENODEV;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
mutex_lock(&fsl_elbc_nand_mutex);
if (!fsl_lbc_ctrl_dev->nand) {
elbc_fcm_ctrl = kzalloc(sizeof(*elbc_fcm_ctrl), GFP_KERNEL);
if (!elbc_fcm_ctrl) {
mutex_unlock(&fsl_elbc_nand_mutex);
ret = -ENOMEM;
goto err;
}
elbc_fcm_ctrl->counter++;
nand_controller_init(&elbc_fcm_ctrl->controller);
fsl_lbc_ctrl_dev->nand = elbc_fcm_ctrl;
} else {
elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
}
mutex_unlock(&fsl_elbc_nand_mutex);
elbc_fcm_ctrl->chips[bank] = priv;
priv->bank = bank;
priv->ctrl = fsl_lbc_ctrl_dev;
priv->dev = &pdev->dev;
dev_set_drvdata(priv->dev, priv);
priv->vbase = ioremap(res.start, resource_size(&res));
if (!priv->vbase) {
dev_err(dev, "failed to map chip region\n");
ret = -ENOMEM;
goto err;
}
mtd = nand_to_mtd(&priv->chip);
mtd->name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
if (!nand_to_mtd(&priv->chip)->name) {
ret = -ENOMEM;
goto err;
}
ret = fsl_elbc_chip_init(priv);
if (ret)
goto err;
priv->chip.controller->ops = &fsl_elbc_controller_ops;
ret = nand_scan(&priv->chip, 1);
if (ret)
goto err;
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
if (ret)
goto cleanup_nand;
pr_info("eLBC NAND device at 0x%llx, bank %d\n",
(unsigned long long)res.start, priv->bank);
return 0;
cleanup_nand:
nand_cleanup(&priv->chip);
err:
fsl_elbc_chip_remove(priv);
return ret;
}
static void fsl_elbc_nand_remove(struct platform_device *pdev)
{
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
struct nand_chip *chip = &priv->chip;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
fsl_elbc_chip_remove(priv);
mutex_lock(&fsl_elbc_nand_mutex);
elbc_fcm_ctrl->counter--;
if (!elbc_fcm_ctrl->counter) {
fsl_lbc_ctrl_dev->nand = NULL;
kfree(elbc_fcm_ctrl);
}
mutex_unlock(&fsl_elbc_nand_mutex);
}
static const struct of_device_id fsl_elbc_nand_match[] = {
{ .compatible = "fsl,elbc-fcm-nand", },
{}
};
MODULE_DEVICE_TABLE(of, fsl_elbc_nand_match);
static struct platform_driver fsl_elbc_nand_driver = {
.driver = {
.name = "fsl,elbc-fcm-nand",
.of_match_table = fsl_elbc_nand_match,
},
.probe = fsl_elbc_nand_probe,
.remove_new = fsl_elbc_nand_remove,
};
module_platform_driver(fsl_elbc_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Freescale");
MODULE_DESCRIPTION("Freescale Enhanced Local Bus Controller MTD NAND driver");
| linux-master | drivers/mtd/nand/raw/fsl_elbc_nand.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2009-2015 Freescale Semiconductor, Inc. and others
*
* Description: MPC5125, VF610, MCF54418 and Kinetis K70 Nand driver.
* Jason ported to M54418TWR and MVFA5 (VF610).
* Authors: Stefan Agner <[email protected]>
* Bill Pringlemeir <[email protected]>
* Shaohui Xie <[email protected]>
* Jason Jin <[email protected]>
*
* Based on original driver mpc5121_nfc.c.
*
* Limitations:
* - Untested on MPC5125 and M54418.
* - DMA and pipelining not used.
* - 2K pages or less.
* - HW ECC: Only 2K page with 64+ OOB.
* - HW ECC: Only 24 and 32-bit error correction implemented.
*/
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/swab.h>
#define DRV_NAME "vf610_nfc"
/* Register Offsets */
#define NFC_FLASH_CMD1 0x3F00
#define NFC_FLASH_CMD2 0x3F04
#define NFC_COL_ADDR 0x3F08
#define NFC_ROW_ADDR 0x3F0c
#define NFC_ROW_ADDR_INC 0x3F14
#define NFC_FLASH_STATUS1 0x3F18
#define NFC_FLASH_STATUS2 0x3F1c
#define NFC_CACHE_SWAP 0x3F28
#define NFC_SECTOR_SIZE 0x3F2c
#define NFC_FLASH_CONFIG 0x3F30
#define NFC_IRQ_STATUS 0x3F38
/* Addresses for NFC MAIN RAM BUFFER areas */
#define NFC_MAIN_AREA(n) ((n) * 0x1000)
#define PAGE_2K 0x0800
#define OOB_64 0x0040
#define OOB_MAX 0x0100
/* NFC_CMD2[CODE] controller cycle bit masks */
#define COMMAND_CMD_BYTE1 BIT(14)
#define COMMAND_CAR_BYTE1 BIT(13)
#define COMMAND_CAR_BYTE2 BIT(12)
#define COMMAND_RAR_BYTE1 BIT(11)
#define COMMAND_RAR_BYTE2 BIT(10)
#define COMMAND_RAR_BYTE3 BIT(9)
#define COMMAND_NADDR_BYTES(x) GENMASK(13, 13 - (x) + 1)
#define COMMAND_WRITE_DATA BIT(8)
#define COMMAND_CMD_BYTE2 BIT(7)
#define COMMAND_RB_HANDSHAKE BIT(6)
#define COMMAND_READ_DATA BIT(5)
#define COMMAND_CMD_BYTE3 BIT(4)
#define COMMAND_READ_STATUS BIT(3)
#define COMMAND_READ_ID BIT(2)
/* NFC ECC mode define */
#define ECC_BYPASS 0
#define ECC_45_BYTE 6
#define ECC_60_BYTE 7
/*** Register Mask and bit definitions */
/* NFC_FLASH_CMD1 Field */
#define CMD_BYTE2_MASK 0xFF000000
#define CMD_BYTE2_SHIFT 24
/* NFC_FLASH_CM2 Field */
#define CMD_BYTE1_MASK 0xFF000000
#define CMD_BYTE1_SHIFT 24
#define CMD_CODE_MASK 0x00FFFF00
#define CMD_CODE_SHIFT 8
#define BUFNO_MASK 0x00000006
#define BUFNO_SHIFT 1
#define START_BIT BIT(0)
/* NFC_COL_ADDR Field */
#define COL_ADDR_MASK 0x0000FFFF
#define COL_ADDR_SHIFT 0
#define COL_ADDR(pos, val) (((val) & 0xFF) << (8 * (pos)))
/* NFC_ROW_ADDR Field */
#define ROW_ADDR_MASK 0x00FFFFFF
#define ROW_ADDR_SHIFT 0
#define ROW_ADDR(pos, val) (((val) & 0xFF) << (8 * (pos)))
#define ROW_ADDR_CHIP_SEL_RB_MASK 0xF0000000
#define ROW_ADDR_CHIP_SEL_RB_SHIFT 28
#define ROW_ADDR_CHIP_SEL_MASK 0x0F000000
#define ROW_ADDR_CHIP_SEL_SHIFT 24
/* NFC_FLASH_STATUS2 Field */
#define STATUS_BYTE1_MASK 0x000000FF
/* NFC_FLASH_CONFIG Field */
#define CONFIG_ECC_SRAM_ADDR_MASK 0x7FC00000
#define CONFIG_ECC_SRAM_ADDR_SHIFT 22
#define CONFIG_ECC_SRAM_REQ_BIT BIT(21)
#define CONFIG_DMA_REQ_BIT BIT(20)
#define CONFIG_ECC_MODE_MASK 0x000E0000
#define CONFIG_ECC_MODE_SHIFT 17
#define CONFIG_FAST_FLASH_BIT BIT(16)
#define CONFIG_16BIT BIT(7)
#define CONFIG_BOOT_MODE_BIT BIT(6)
#define CONFIG_ADDR_AUTO_INCR_BIT BIT(5)
#define CONFIG_BUFNO_AUTO_INCR_BIT BIT(4)
#define CONFIG_PAGE_CNT_MASK 0xF
#define CONFIG_PAGE_CNT_SHIFT 0
/* NFC_IRQ_STATUS Field */
#define IDLE_IRQ_BIT BIT(29)
#define IDLE_EN_BIT BIT(20)
#define CMD_DONE_CLEAR_BIT BIT(18)
#define IDLE_CLEAR_BIT BIT(17)
/*
* ECC status - seems to consume 8 bytes (double word). The documented
* status byte is located in the lowest byte of the second word (which is
* the 4th or 7th byte depending on endianness).
* Calculate an offset to store the ECC status at the end of the buffer.
*/
#define ECC_SRAM_ADDR (PAGE_2K + OOB_MAX - 8)
#define ECC_STATUS 0x4
#define ECC_STATUS_MASK 0x80
#define ECC_STATUS_ERR_COUNT 0x3F
enum vf610_nfc_variant {
NFC_VFC610 = 1,
};
struct vf610_nfc {
struct nand_controller base;
struct nand_chip chip;
struct device *dev;
void __iomem *regs;
struct completion cmd_done;
/* Status and ID are in alternate locations. */
enum vf610_nfc_variant variant;
struct clk *clk;
/*
* Indicate that user data is accessed (full page/oob). This is
* useful to indicate the driver whether to swap byte endianness.
* See comments in vf610_nfc_rd_from_sram/vf610_nfc_wr_to_sram.
*/
bool data_access;
u32 ecc_mode;
};
static inline struct vf610_nfc *chip_to_nfc(struct nand_chip *chip)
{
return container_of(chip, struct vf610_nfc, chip);
}
static inline u32 vf610_nfc_read(struct vf610_nfc *nfc, uint reg)
{
return readl(nfc->regs + reg);
}
static inline void vf610_nfc_write(struct vf610_nfc *nfc, uint reg, u32 val)
{
writel(val, nfc->regs + reg);
}
static inline void vf610_nfc_set(struct vf610_nfc *nfc, uint reg, u32 bits)
{
vf610_nfc_write(nfc, reg, vf610_nfc_read(nfc, reg) | bits);
}
static inline void vf610_nfc_clear(struct vf610_nfc *nfc, uint reg, u32 bits)
{
vf610_nfc_write(nfc, reg, vf610_nfc_read(nfc, reg) & ~bits);
}
static inline void vf610_nfc_set_field(struct vf610_nfc *nfc, u32 reg,
u32 mask, u32 shift, u32 val)
{
vf610_nfc_write(nfc, reg,
(vf610_nfc_read(nfc, reg) & (~mask)) | val << shift);
}
static inline bool vf610_nfc_kernel_is_little_endian(void)
{
#ifdef __LITTLE_ENDIAN
return true;
#else
return false;
#endif
}
/*
* Read accessor for internal SRAM buffer
* @dst: destination address in regular memory
* @src: source address in SRAM buffer
* @len: bytes to copy
* @fix_endian: Fix endianness if required
*
* Use this accessor for the internal SRAM buffers. On the ARM
* Freescale Vybrid SoC it's known that the driver can treat
* the SRAM buffer as if it's memory. Other platform might need
* to treat the buffers differently.
*
* The controller stores bytes from the NAND chip internally in big
* endianness. On little endian platforms such as Vybrid this leads
* to reversed byte order.
* For performance reason (and earlier probably due to unawareness)
* the driver avoids correcting endianness where it has control over
* write and read side (e.g. page wise data access).
*/
static inline void vf610_nfc_rd_from_sram(void *dst, const void __iomem *src,
size_t len, bool fix_endian)
{
if (vf610_nfc_kernel_is_little_endian() && fix_endian) {
unsigned int i;
for (i = 0; i < len; i += 4) {
u32 val = swab32(__raw_readl(src + i));
memcpy(dst + i, &val, min(sizeof(val), len - i));
}
} else {
memcpy_fromio(dst, src, len);
}
}
/*
* Write accessor for internal SRAM buffer
* @dst: destination address in SRAM buffer
* @src: source address in regular memory
* @len: bytes to copy
* @fix_endian: Fix endianness if required
*
* Use this accessor for the internal SRAM buffers. On the ARM
* Freescale Vybrid SoC it's known that the driver can treat
* the SRAM buffer as if it's memory. Other platform might need
* to treat the buffers differently.
*
* The controller stores bytes from the NAND chip internally in big
* endianness. On little endian platforms such as Vybrid this leads
* to reversed byte order.
* For performance reason (and earlier probably due to unawareness)
* the driver avoids correcting endianness where it has control over
* write and read side (e.g. page wise data access).
*/
static inline void vf610_nfc_wr_to_sram(void __iomem *dst, const void *src,
size_t len, bool fix_endian)
{
if (vf610_nfc_kernel_is_little_endian() && fix_endian) {
unsigned int i;
for (i = 0; i < len; i += 4) {
u32 val;
memcpy(&val, src + i, min(sizeof(val), len - i));
__raw_writel(swab32(val), dst + i);
}
} else {
memcpy_toio(dst, src, len);
}
}
/* Clear flags for upcoming command */
static inline void vf610_nfc_clear_status(struct vf610_nfc *nfc)
{
u32 tmp = vf610_nfc_read(nfc, NFC_IRQ_STATUS);
tmp |= CMD_DONE_CLEAR_BIT | IDLE_CLEAR_BIT;
vf610_nfc_write(nfc, NFC_IRQ_STATUS, tmp);
}
static void vf610_nfc_done(struct vf610_nfc *nfc)
{
unsigned long timeout = msecs_to_jiffies(100);
/*
* Barrier is needed after this write. This write need
* to be done before reading the next register the first
* time.
* vf610_nfc_set implicates such a barrier by using writel
* to write to the register.
*/
vf610_nfc_set(nfc, NFC_IRQ_STATUS, IDLE_EN_BIT);
vf610_nfc_set(nfc, NFC_FLASH_CMD2, START_BIT);
if (!wait_for_completion_timeout(&nfc->cmd_done, timeout))
dev_warn(nfc->dev, "Timeout while waiting for BUSY.\n");
vf610_nfc_clear_status(nfc);
}
static irqreturn_t vf610_nfc_irq(int irq, void *data)
{
struct vf610_nfc *nfc = data;
vf610_nfc_clear(nfc, NFC_IRQ_STATUS, IDLE_EN_BIT);
complete(&nfc->cmd_done);
return IRQ_HANDLED;
}
static inline void vf610_nfc_ecc_mode(struct vf610_nfc *nfc, int ecc_mode)
{
vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG,
CONFIG_ECC_MODE_MASK,
CONFIG_ECC_MODE_SHIFT, ecc_mode);
}
static inline void vf610_nfc_run(struct vf610_nfc *nfc, u32 col, u32 row,
u32 cmd1, u32 cmd2, u32 trfr_sz)
{
vf610_nfc_set_field(nfc, NFC_COL_ADDR, COL_ADDR_MASK,
COL_ADDR_SHIFT, col);
vf610_nfc_set_field(nfc, NFC_ROW_ADDR, ROW_ADDR_MASK,
ROW_ADDR_SHIFT, row);
vf610_nfc_write(nfc, NFC_SECTOR_SIZE, trfr_sz);
vf610_nfc_write(nfc, NFC_FLASH_CMD1, cmd1);
vf610_nfc_write(nfc, NFC_FLASH_CMD2, cmd2);
dev_dbg(nfc->dev,
"col 0x%04x, row 0x%08x, cmd1 0x%08x, cmd2 0x%08x, len %d\n",
col, row, cmd1, cmd2, trfr_sz);
vf610_nfc_done(nfc);
}
static inline const struct nand_op_instr *
vf610_get_next_instr(const struct nand_subop *subop, int *op_id)
{
if (*op_id + 1 >= subop->ninstrs)
return NULL;
(*op_id)++;
return &subop->instrs[*op_id];
}
static int vf610_nfc_cmd(struct nand_chip *chip,
const struct nand_subop *subop)
{
const struct nand_op_instr *instr;
struct vf610_nfc *nfc = chip_to_nfc(chip);
int op_id = -1, trfr_sz = 0, offset = 0;
u32 col = 0, row = 0, cmd1 = 0, cmd2 = 0, code = 0;
bool force8bit = false;
/*
* Some ops are optional, but the hardware requires the operations
* to be in this exact order.
* The op parser enforces the order and makes sure that there isn't
* a read and write element in a single operation.
*/
instr = vf610_get_next_instr(subop, &op_id);
if (!instr)
return -EINVAL;
if (instr && instr->type == NAND_OP_CMD_INSTR) {
cmd2 |= instr->ctx.cmd.opcode << CMD_BYTE1_SHIFT;
code |= COMMAND_CMD_BYTE1;
instr = vf610_get_next_instr(subop, &op_id);
}
if (instr && instr->type == NAND_OP_ADDR_INSTR) {
int naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
int i = nand_subop_get_addr_start_off(subop, op_id);
for (; i < naddrs; i++) {
u8 val = instr->ctx.addr.addrs[i];
if (i < 2)
col |= COL_ADDR(i, val);
else
row |= ROW_ADDR(i - 2, val);
}
code |= COMMAND_NADDR_BYTES(naddrs);
instr = vf610_get_next_instr(subop, &op_id);
}
if (instr && instr->type == NAND_OP_DATA_OUT_INSTR) {
trfr_sz = nand_subop_get_data_len(subop, op_id);
offset = nand_subop_get_data_start_off(subop, op_id);
force8bit = instr->ctx.data.force_8bit;
/*
* Don't fix endianness on page access for historical reasons.
* See comment in vf610_nfc_wr_to_sram
*/
vf610_nfc_wr_to_sram(nfc->regs + NFC_MAIN_AREA(0) + offset,
instr->ctx.data.buf.out + offset,
trfr_sz, !nfc->data_access);
code |= COMMAND_WRITE_DATA;
instr = vf610_get_next_instr(subop, &op_id);
}
if (instr && instr->type == NAND_OP_CMD_INSTR) {
cmd1 |= instr->ctx.cmd.opcode << CMD_BYTE2_SHIFT;
code |= COMMAND_CMD_BYTE2;
instr = vf610_get_next_instr(subop, &op_id);
}
if (instr && instr->type == NAND_OP_WAITRDY_INSTR) {
code |= COMMAND_RB_HANDSHAKE;
instr = vf610_get_next_instr(subop, &op_id);
}
if (instr && instr->type == NAND_OP_DATA_IN_INSTR) {
trfr_sz = nand_subop_get_data_len(subop, op_id);
offset = nand_subop_get_data_start_off(subop, op_id);
force8bit = instr->ctx.data.force_8bit;
code |= COMMAND_READ_DATA;
}
if (force8bit && (chip->options & NAND_BUSWIDTH_16))
vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
cmd2 |= code << CMD_CODE_SHIFT;
vf610_nfc_run(nfc, col, row, cmd1, cmd2, trfr_sz);
if (instr && instr->type == NAND_OP_DATA_IN_INSTR) {
/*
* Don't fix endianness on page access for historical reasons.
* See comment in vf610_nfc_rd_from_sram
*/
vf610_nfc_rd_from_sram(instr->ctx.data.buf.in + offset,
nfc->regs + NFC_MAIN_AREA(0) + offset,
trfr_sz, !nfc->data_access);
}
if (force8bit && (chip->options & NAND_BUSWIDTH_16))
vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
return 0;
}
static const struct nand_op_parser vf610_nfc_op_parser = NAND_OP_PARSER(
NAND_OP_PARSER_PATTERN(vf610_nfc_cmd,
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_ADDR_ELEM(true, 5),
NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, PAGE_2K + OOB_MAX),
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
NAND_OP_PARSER_PATTERN(vf610_nfc_cmd,
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_ADDR_ELEM(true, 5),
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, PAGE_2K + OOB_MAX)),
);
/*
* This function supports Vybrid only (MPC5125 would have full RB and four CS)
*/
static void vf610_nfc_select_target(struct nand_chip *chip, unsigned int cs)
{
struct vf610_nfc *nfc = chip_to_nfc(chip);
u32 tmp;
/* Vybrid only (MPC5125 would have full RB and four CS) */
if (nfc->variant != NFC_VFC610)
return;
tmp = vf610_nfc_read(nfc, NFC_ROW_ADDR);
tmp &= ~(ROW_ADDR_CHIP_SEL_RB_MASK | ROW_ADDR_CHIP_SEL_MASK);
tmp |= 1 << ROW_ADDR_CHIP_SEL_RB_SHIFT;
tmp |= BIT(cs) << ROW_ADDR_CHIP_SEL_SHIFT;
vf610_nfc_write(nfc, NFC_ROW_ADDR, tmp);
}
static int vf610_nfc_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
if (!check_only)
vf610_nfc_select_target(chip, op->cs);
return nand_op_parser_exec_op(chip, &vf610_nfc_op_parser, op,
check_only);
}
static inline int vf610_nfc_correct_data(struct nand_chip *chip, uint8_t *dat,
uint8_t *oob, int page)
{
struct vf610_nfc *nfc = chip_to_nfc(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
u32 ecc_status_off = NFC_MAIN_AREA(0) + ECC_SRAM_ADDR + ECC_STATUS;
u8 ecc_status;
u8 ecc_count;
int flips_threshold = nfc->chip.ecc.strength / 2;
ecc_status = vf610_nfc_read(nfc, ecc_status_off) & 0xff;
ecc_count = ecc_status & ECC_STATUS_ERR_COUNT;
if (!(ecc_status & ECC_STATUS_MASK))
return ecc_count;
nfc->data_access = true;
nand_read_oob_op(&nfc->chip, page, 0, oob, mtd->oobsize);
nfc->data_access = false;
/*
* On an erased page, bit count (including OOB) should be zero or
* at least less then half of the ECC strength.
*/
return nand_check_erased_ecc_chunk(dat, nfc->chip.ecc.size, oob,
mtd->oobsize, NULL, 0,
flips_threshold);
}
static void vf610_nfc_fill_row(struct nand_chip *chip, int page, u32 *code,
u32 *row)
{
*row = ROW_ADDR(0, page & 0xff) | ROW_ADDR(1, page >> 8);
*code |= COMMAND_RAR_BYTE1 | COMMAND_RAR_BYTE2;
if (chip->options & NAND_ROW_ADDR_3) {
*row |= ROW_ADDR(2, page >> 16);
*code |= COMMAND_RAR_BYTE3;
}
}
static int vf610_nfc_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct vf610_nfc *nfc = chip_to_nfc(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
int trfr_sz = mtd->writesize + mtd->oobsize;
u32 row = 0, cmd1 = 0, cmd2 = 0, code = 0;
int stat;
vf610_nfc_select_target(chip, chip->cur_cs);
cmd2 |= NAND_CMD_READ0 << CMD_BYTE1_SHIFT;
code |= COMMAND_CMD_BYTE1 | COMMAND_CAR_BYTE1 | COMMAND_CAR_BYTE2;
vf610_nfc_fill_row(chip, page, &code, &row);
cmd1 |= NAND_CMD_READSTART << CMD_BYTE2_SHIFT;
code |= COMMAND_CMD_BYTE2 | COMMAND_RB_HANDSHAKE | COMMAND_READ_DATA;
cmd2 |= code << CMD_CODE_SHIFT;
vf610_nfc_ecc_mode(nfc, nfc->ecc_mode);
vf610_nfc_run(nfc, 0, row, cmd1, cmd2, trfr_sz);
vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
/*
* Don't fix endianness on page access for historical reasons.
* See comment in vf610_nfc_rd_from_sram
*/
vf610_nfc_rd_from_sram(buf, nfc->regs + NFC_MAIN_AREA(0),
mtd->writesize, false);
if (oob_required)
vf610_nfc_rd_from_sram(chip->oob_poi,
nfc->regs + NFC_MAIN_AREA(0) +
mtd->writesize,
mtd->oobsize, false);
stat = vf610_nfc_correct_data(chip, buf, chip->oob_poi, page);
if (stat < 0) {
mtd->ecc_stats.failed++;
return 0;
} else {
mtd->ecc_stats.corrected += stat;
return stat;
}
}
static int vf610_nfc_write_page(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct vf610_nfc *nfc = chip_to_nfc(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
int trfr_sz = mtd->writesize + mtd->oobsize;
u32 row = 0, cmd1 = 0, cmd2 = 0, code = 0;
u8 status;
int ret;
vf610_nfc_select_target(chip, chip->cur_cs);
cmd2 |= NAND_CMD_SEQIN << CMD_BYTE1_SHIFT;
code |= COMMAND_CMD_BYTE1 | COMMAND_CAR_BYTE1 | COMMAND_CAR_BYTE2;
vf610_nfc_fill_row(chip, page, &code, &row);
cmd1 |= NAND_CMD_PAGEPROG << CMD_BYTE2_SHIFT;
code |= COMMAND_CMD_BYTE2 | COMMAND_WRITE_DATA;
/*
* Don't fix endianness on page access for historical reasons.
* See comment in vf610_nfc_wr_to_sram
*/
vf610_nfc_wr_to_sram(nfc->regs + NFC_MAIN_AREA(0), buf,
mtd->writesize, false);
code |= COMMAND_RB_HANDSHAKE;
cmd2 |= code << CMD_CODE_SHIFT;
vf610_nfc_ecc_mode(nfc, nfc->ecc_mode);
vf610_nfc_run(nfc, 0, row, cmd1, cmd2, trfr_sz);
vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
ret = nand_status_op(chip, &status);
if (ret)
return ret;
if (status & NAND_STATUS_FAIL)
return -EIO;
return 0;
}
static int vf610_nfc_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct vf610_nfc *nfc = chip_to_nfc(chip);
int ret;
nfc->data_access = true;
ret = nand_read_page_raw(chip, buf, oob_required, page);
nfc->data_access = false;
return ret;
}
static int vf610_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
struct vf610_nfc *nfc = chip_to_nfc(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
nfc->data_access = true;
ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
if (!ret && oob_required)
ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
false);
nfc->data_access = false;
if (ret)
return ret;
return nand_prog_page_end_op(chip);
}
static int vf610_nfc_read_oob(struct nand_chip *chip, int page)
{
struct vf610_nfc *nfc = chip_to_nfc(chip);
int ret;
nfc->data_access = true;
ret = nand_read_oob_std(chip, page);
nfc->data_access = false;
return ret;
}
static int vf610_nfc_write_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct vf610_nfc *nfc = chip_to_nfc(chip);
int ret;
nfc->data_access = true;
ret = nand_prog_page_begin_op(chip, page, mtd->writesize,
chip->oob_poi, mtd->oobsize);
nfc->data_access = false;
if (ret)
return ret;
return nand_prog_page_end_op(chip);
}
static const struct of_device_id vf610_nfc_dt_ids[] = {
{ .compatible = "fsl,vf610-nfc", .data = (void *)NFC_VFC610 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, vf610_nfc_dt_ids);
static void vf610_nfc_preinit_controller(struct vf610_nfc *nfc)
{
vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_ADDR_AUTO_INCR_BIT);
vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_BUFNO_AUTO_INCR_BIT);
vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_BOOT_MODE_BIT);
vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_DMA_REQ_BIT);
vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_FAST_FLASH_BIT);
vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
/* Disable virtual pages, only one elementary transfer unit */
vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG, CONFIG_PAGE_CNT_MASK,
CONFIG_PAGE_CNT_SHIFT, 1);
}
static void vf610_nfc_init_controller(struct vf610_nfc *nfc)
{
if (nfc->chip.options & NAND_BUSWIDTH_16)
vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
else
vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
if (nfc->chip.ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
/* Set ECC status offset in SRAM */
vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG,
CONFIG_ECC_SRAM_ADDR_MASK,
CONFIG_ECC_SRAM_ADDR_SHIFT,
ECC_SRAM_ADDR >> 3);
/* Enable ECC status in SRAM */
vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_ECC_SRAM_REQ_BIT);
}
}
static int vf610_nfc_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct vf610_nfc *nfc = chip_to_nfc(chip);
vf610_nfc_init_controller(nfc);
/* Bad block options. */
if (chip->bbt_options & NAND_BBT_USE_FLASH)
chip->bbt_options |= NAND_BBT_NO_OOB;
/* Single buffer only, max 256 OOB minus ECC status */
if (mtd->writesize + mtd->oobsize > PAGE_2K + OOB_MAX - 8) {
dev_err(nfc->dev, "Unsupported flash page size\n");
return -ENXIO;
}
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return 0;
if (mtd->writesize != PAGE_2K && mtd->oobsize < 64) {
dev_err(nfc->dev, "Unsupported flash with hwecc\n");
return -ENXIO;
}
if (chip->ecc.size != mtd->writesize) {
dev_err(nfc->dev, "Step size needs to be page size\n");
return -ENXIO;
}
/* Only 64 byte ECC layouts known */
if (mtd->oobsize > 64)
mtd->oobsize = 64;
/* Use default large page ECC layout defined in NAND core */
mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
if (chip->ecc.strength == 32) {
nfc->ecc_mode = ECC_60_BYTE;
chip->ecc.bytes = 60;
} else if (chip->ecc.strength == 24) {
nfc->ecc_mode = ECC_45_BYTE;
chip->ecc.bytes = 45;
} else {
dev_err(nfc->dev, "Unsupported ECC strength\n");
return -ENXIO;
}
chip->ecc.read_page = vf610_nfc_read_page;
chip->ecc.write_page = vf610_nfc_write_page;
chip->ecc.read_page_raw = vf610_nfc_read_page_raw;
chip->ecc.write_page_raw = vf610_nfc_write_page_raw;
chip->ecc.read_oob = vf610_nfc_read_oob;
chip->ecc.write_oob = vf610_nfc_write_oob;
chip->ecc.size = PAGE_2K;
return 0;
}
static const struct nand_controller_ops vf610_nfc_controller_ops = {
.attach_chip = vf610_nfc_attach_chip,
.exec_op = vf610_nfc_exec_op,
};
static int vf610_nfc_probe(struct platform_device *pdev)
{
struct vf610_nfc *nfc;
struct mtd_info *mtd;
struct nand_chip *chip;
struct device_node *child;
const struct of_device_id *of_id;
int err;
int irq;
nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
if (!nfc)
return -ENOMEM;
nfc->dev = &pdev->dev;
chip = &nfc->chip;
mtd = nand_to_mtd(chip);
mtd->owner = THIS_MODULE;
mtd->dev.parent = nfc->dev;
mtd->name = DRV_NAME;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
nfc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nfc->regs))
return PTR_ERR(nfc->regs);
nfc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(nfc->clk)) {
dev_err(nfc->dev, "Unable to get and enable clock!\n");
return PTR_ERR(nfc->clk);
}
of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
if (!of_id)
return -ENODEV;
nfc->variant = (uintptr_t)of_id->data;
for_each_available_child_of_node(nfc->dev->of_node, child) {
if (of_device_is_compatible(child, "fsl,vf610-nfc-nandcs")) {
if (nand_get_flash_node(chip)) {
dev_err(nfc->dev,
"Only one NAND chip supported!\n");
of_node_put(child);
return -EINVAL;
}
nand_set_flash_node(chip, child);
}
}
if (!nand_get_flash_node(chip)) {
dev_err(nfc->dev, "NAND chip sub-node missing!\n");
return -ENODEV;
}
chip->options |= NAND_NO_SUBPAGE_WRITE;
init_completion(&nfc->cmd_done);
err = devm_request_irq(nfc->dev, irq, vf610_nfc_irq, 0, DRV_NAME, nfc);
if (err) {
dev_err(nfc->dev, "Error requesting IRQ!\n");
return err;
}
vf610_nfc_preinit_controller(nfc);
nand_controller_init(&nfc->base);
nfc->base.ops = &vf610_nfc_controller_ops;
chip->controller = &nfc->base;
/* Scan the NAND chip */
err = nand_scan(chip, 1);
if (err)
return err;
platform_set_drvdata(pdev, nfc);
/* Register device in MTD */
err = mtd_device_register(mtd, NULL, 0);
if (err)
goto err_cleanup_nand;
return 0;
err_cleanup_nand:
nand_cleanup(chip);
return err;
}
static void vf610_nfc_remove(struct platform_device *pdev)
{
struct vf610_nfc *nfc = platform_get_drvdata(pdev);
struct nand_chip *chip = &nfc->chip;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
}
#ifdef CONFIG_PM_SLEEP
static int vf610_nfc_suspend(struct device *dev)
{
struct vf610_nfc *nfc = dev_get_drvdata(dev);
clk_disable_unprepare(nfc->clk);
return 0;
}
static int vf610_nfc_resume(struct device *dev)
{
struct vf610_nfc *nfc = dev_get_drvdata(dev);
int err;
err = clk_prepare_enable(nfc->clk);
if (err)
return err;
vf610_nfc_preinit_controller(nfc);
vf610_nfc_init_controller(nfc);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(vf610_nfc_pm_ops, vf610_nfc_suspend, vf610_nfc_resume);
static struct platform_driver vf610_nfc_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = vf610_nfc_dt_ids,
.pm = &vf610_nfc_pm_ops,
},
.probe = vf610_nfc_probe,
.remove_new = vf610_nfc_remove,
};
module_platform_driver(vf610_nfc_driver);
MODULE_AUTHOR("Stefan Agner <[email protected]>");
MODULE_DESCRIPTION("Freescale VF610/MPC5125 NFC MTD NAND driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/nand/raw/vf610_nfc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Free Electrons
* Copyright (C) 2017 NextThing Co
*
* Author: Boris Brezillon <[email protected]>
*/
#include "internals.h"
/* Bit for detecting BENAND */
#define TOSHIBA_NAND_ID4_IS_BENAND BIT(7)
/* Recommended to rewrite for BENAND */
#define TOSHIBA_NAND_STATUS_REWRITE_RECOMMENDED BIT(3)
/* ECC Status Read Command for BENAND */
#define TOSHIBA_NAND_CMD_ECC_STATUS_READ 0x7A
/* ECC Status Mask for BENAND */
#define TOSHIBA_NAND_ECC_STATUS_MASK 0x0F
/* Uncorrectable Error for BENAND */
#define TOSHIBA_NAND_ECC_STATUS_UNCORR 0x0F
/* Max ECC Steps for BENAND */
#define TOSHIBA_NAND_MAX_ECC_STEPS 8
static int toshiba_nand_benand_read_eccstatus_op(struct nand_chip *chip,
u8 *buf)
{
u8 *ecc_status = buf;
if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(nand_get_interface_config(chip));
struct nand_op_instr instrs[] = {
NAND_OP_CMD(TOSHIBA_NAND_CMD_ECC_STATUS_READ,
PSEC_TO_NSEC(sdr->tADL_min)),
NAND_OP_8BIT_DATA_IN(chip->ecc.steps, ecc_status, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
return -ENOTSUPP;
}
static int toshiba_nand_benand_eccstatus(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
unsigned int max_bitflips = 0;
u8 status, ecc_status[TOSHIBA_NAND_MAX_ECC_STEPS];
/* Check Status */
ret = toshiba_nand_benand_read_eccstatus_op(chip, ecc_status);
if (!ret) {
unsigned int i, bitflips = 0;
for (i = 0; i < chip->ecc.steps; i++) {
bitflips = ecc_status[i] & TOSHIBA_NAND_ECC_STATUS_MASK;
if (bitflips == TOSHIBA_NAND_ECC_STATUS_UNCORR) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += bitflips;
max_bitflips = max(max_bitflips, bitflips);
}
}
return max_bitflips;
}
/*
* Fallback to regular status check if
* toshiba_nand_benand_read_eccstatus_op() failed.
*/
ret = nand_status_op(chip, &status);
if (ret)
return ret;
if (status & NAND_STATUS_FAIL) {
/* uncorrected */
mtd->ecc_stats.failed++;
} else if (status & TOSHIBA_NAND_STATUS_REWRITE_RECOMMENDED) {
/* corrected */
max_bitflips = mtd->bitflip_threshold;
mtd->ecc_stats.corrected += max_bitflips;
}
return max_bitflips;
}
static int
toshiba_nand_read_page_benand(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
int ret;
ret = nand_read_page_raw(chip, buf, oob_required, page);
if (ret)
return ret;
return toshiba_nand_benand_eccstatus(chip);
}
static int
toshiba_nand_read_subpage_benand(struct nand_chip *chip, uint32_t data_offs,
uint32_t readlen, uint8_t *bufpoi, int page)
{
int ret;
ret = nand_read_page_op(chip, page, data_offs,
bufpoi + data_offs, readlen);
if (ret)
return ret;
return toshiba_nand_benand_eccstatus(chip);
}
static void toshiba_nand_benand_init(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
/*
* On BENAND, the entire OOB region can be used by the MTD user.
* The calculated ECC bytes are stored into other isolated
* area which is not accessible to users.
* This is why chip->ecc.bytes = 0.
*/
chip->ecc.bytes = 0;
chip->ecc.size = 512;
chip->ecc.strength = 8;
chip->ecc.read_page = toshiba_nand_read_page_benand;
chip->ecc.read_subpage = toshiba_nand_read_subpage_benand;
chip->ecc.write_page = nand_write_page_raw;
chip->ecc.read_page_raw = nand_read_page_raw_notsupp;
chip->ecc.write_page_raw = nand_write_page_raw_notsupp;
chip->options |= NAND_SUBPAGE_READ;
mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
}
static void toshiba_nand_decode_id(struct nand_chip *chip)
{
struct nand_device *base = &chip->base;
struct nand_ecc_props requirements = {};
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
memorg = nanddev_get_memorg(&chip->base);
nand_decode_ext_id(chip);
/*
* Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
* 512B page. For Toshiba SLC, we decode the 5th/6th byte as
* follows:
* - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
* 110b -> 24nm
* - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC
*/
if (chip->id.len >= 6 && nand_is_slc(chip) &&
(chip->id.data[5] & 0x7) == 0x6 /* 24nm */ &&
!(chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND) /* !BENAND */) {
memorg->oobsize = 32 * memorg->pagesize >> 9;
mtd->oobsize = memorg->oobsize;
}
/*
* Extract ECC requirements from 6th id byte.
* For Toshiba SLC, ecc requrements are as follows:
* - 43nm: 1 bit ECC for each 512Byte is required.
* - 32nm: 4 bit ECC for each 512Byte is required.
* - 24nm: 8 bit ECC for each 512Byte is required.
*/
if (chip->id.len >= 6 && nand_is_slc(chip)) {
requirements.step_size = 512;
switch (chip->id.data[5] & 0x7) {
case 0x4:
requirements.strength = 1;
break;
case 0x5:
requirements.strength = 4;
break;
case 0x6:
requirements.strength = 8;
break;
default:
WARN(1, "Could not get ECC info");
requirements.step_size = 0;
break;
}
}
nanddev_set_ecc_requirements(base, &requirements);
}
static int
tc58teg5dclta00_choose_interface_config(struct nand_chip *chip,
struct nand_interface_config *iface)
{
onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 5);
return nand_choose_best_sdr_timings(chip, iface, NULL);
}
static int
tc58nvg0s3e_choose_interface_config(struct nand_chip *chip,
struct nand_interface_config *iface)
{
onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 2);
return nand_choose_best_sdr_timings(chip, iface, NULL);
}
static int
th58nvg2s3hbai4_choose_interface_config(struct nand_chip *chip,
struct nand_interface_config *iface)
{
struct nand_sdr_timings *sdr = &iface->timings.sdr;
/* Start with timings from the closest timing mode, mode 4. */
onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4);
/* Patch timings that differ from mode 4. */
sdr->tALS_min = 12000;
sdr->tCHZ_max = 20000;
sdr->tCLS_min = 12000;
sdr->tCOH_min = 0;
sdr->tDS_min = 12000;
sdr->tRHOH_min = 25000;
sdr->tRHW_min = 30000;
sdr->tRHZ_max = 60000;
sdr->tWHR_min = 60000;
/* Patch timings not part of onfi timing mode. */
sdr->tPROG_max = 700000000;
sdr->tBERS_max = 5000000000;
return nand_choose_best_sdr_timings(chip, iface, sdr);
}
static int tc58teg5dclta00_init(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
chip->ops.choose_interface_config =
&tc58teg5dclta00_choose_interface_config;
chip->options |= NAND_NEED_SCRAMBLING;
mtd_set_pairing_scheme(mtd, &dist3_pairing_scheme);
return 0;
}
static int tc58nvg0s3e_init(struct nand_chip *chip)
{
chip->ops.choose_interface_config =
&tc58nvg0s3e_choose_interface_config;
return 0;
}
static int th58nvg2s3hbai4_init(struct nand_chip *chip)
{
chip->ops.choose_interface_config =
&th58nvg2s3hbai4_choose_interface_config;
return 0;
}
static int toshiba_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
/* Check that chip is BENAND and ECC mode is on-die */
if (nand_is_slc(chip) &&
chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND)
toshiba_nand_benand_init(chip);
if (!strcmp("TC58TEG5DCLTA00", chip->parameters.model))
tc58teg5dclta00_init(chip);
if (!strncmp("TC58NVG0S3E", chip->parameters.model,
sizeof("TC58NVG0S3E") - 1))
tc58nvg0s3e_init(chip);
if ((!strncmp("TH58NVG2S3HBAI4", chip->parameters.model,
sizeof("TH58NVG2S3HBAI4") - 1)) ||
(!strncmp("TH58NVG3S0HBAI4", chip->parameters.model,
sizeof("TH58NVG3S0HBAI4") - 1)))
th58nvg2s3hbai4_init(chip);
return 0;
}
const struct nand_manufacturer_ops toshiba_nand_manuf_ops = {
.detect = toshiba_nand_decode_id,
.init = toshiba_nand_init,
};
| linux-master | drivers/mtd/nand/raw/nand_toshiba.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 Steven J. Hill ([email protected])
* 2002-2006 Thomas Gleixner ([email protected])
*
* Credits:
* David Woodhouse for adding multichip support
*
* Aleph One Ltd. and Toby Churchill Ltd. for supporting the
* rework for 2K page size chips
*
* This file contains all legacy helpers/code that should be removed
* at some point.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/nmi.h>
#include "internals.h"
/**
* nand_read_byte - [DEFAULT] read one byte from the chip
* @chip: NAND chip object
*
* Default read function for 8bit buswidth
*/
static uint8_t nand_read_byte(struct nand_chip *chip)
{
return readb(chip->legacy.IO_ADDR_R);
}
/**
* nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
* @chip: NAND chip object
*
* Default read function for 16bit buswidth with endianness conversion.
*
*/
static uint8_t nand_read_byte16(struct nand_chip *chip)
{
return (uint8_t) cpu_to_le16(readw(chip->legacy.IO_ADDR_R));
}
/**
* nand_select_chip - [DEFAULT] control CE line
* @chip: NAND chip object
* @chipnr: chipnumber to select, -1 for deselect
*
* Default select function for 1 chip devices.
*/
static void nand_select_chip(struct nand_chip *chip, int chipnr)
{
switch (chipnr) {
case -1:
chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
0 | NAND_CTRL_CHANGE);
break;
case 0:
break;
default:
BUG();
}
}
/**
* nand_write_byte - [DEFAULT] write single byte to chip
* @chip: NAND chip object
* @byte: value to write
*
* Default function to write a byte to I/O[7:0]
*/
static void nand_write_byte(struct nand_chip *chip, uint8_t byte)
{
chip->legacy.write_buf(chip, &byte, 1);
}
/**
* nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
* @chip: NAND chip object
* @byte: value to write
*
* Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
*/
static void nand_write_byte16(struct nand_chip *chip, uint8_t byte)
{
uint16_t word = byte;
/*
* It's not entirely clear what should happen to I/O[15:8] when writing
* a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
*
* When the host supports a 16-bit bus width, only data is
* transferred at the 16-bit width. All address and command line
* transfers shall use only the lower 8-bits of the data bus. During
* command transfers, the host may place any value on the upper
* 8-bits of the data bus. During address transfers, the host shall
* set the upper 8-bits of the data bus to 00h.
*
* One user of the write_byte callback is nand_set_features. The
* four parameters are specified to be written to I/O[7:0], but this is
* neither an address nor a command transfer. Let's assume a 0 on the
* upper I/O lines is OK.
*/
chip->legacy.write_buf(chip, (uint8_t *)&word, 2);
}
/**
* nand_write_buf - [DEFAULT] write buffer to chip
* @chip: NAND chip object
* @buf: data buffer
* @len: number of bytes to write
*
* Default write function for 8bit buswidth.
*/
static void nand_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
{
iowrite8_rep(chip->legacy.IO_ADDR_W, buf, len);
}
/**
* nand_read_buf - [DEFAULT] read chip data into buffer
* @chip: NAND chip object
* @buf: buffer to store date
* @len: number of bytes to read
*
* Default read function for 8bit buswidth.
*/
static void nand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
ioread8_rep(chip->legacy.IO_ADDR_R, buf, len);
}
/**
* nand_write_buf16 - [DEFAULT] write buffer to chip
* @chip: NAND chip object
* @buf: data buffer
* @len: number of bytes to write
*
* Default write function for 16bit buswidth.
*/
static void nand_write_buf16(struct nand_chip *chip, const uint8_t *buf,
int len)
{
u16 *p = (u16 *) buf;
iowrite16_rep(chip->legacy.IO_ADDR_W, p, len >> 1);
}
/**
* nand_read_buf16 - [DEFAULT] read chip data into buffer
* @chip: NAND chip object
* @buf: buffer to store date
* @len: number of bytes to read
*
* Default read function for 16bit buswidth.
*/
static void nand_read_buf16(struct nand_chip *chip, uint8_t *buf, int len)
{
u16 *p = (u16 *) buf;
ioread16_rep(chip->legacy.IO_ADDR_R, p, len >> 1);
}
/**
* panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
* @chip: NAND chip object
* @timeo: Timeout
*
* Helper function for nand_wait_ready used when needing to wait in interrupt
* context.
*/
static void panic_nand_wait_ready(struct nand_chip *chip, unsigned long timeo)
{
int i;
/* Wait for the device to get ready */
for (i = 0; i < timeo; i++) {
if (chip->legacy.dev_ready(chip))
break;
touch_softlockup_watchdog();
mdelay(1);
}
}
/**
* nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
* @chip: NAND chip object
*
* Wait for the ready pin after a command, and warn if a timeout occurs.
*/
void nand_wait_ready(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned long timeo = 400;
if (mtd->oops_panic_write)
return panic_nand_wait_ready(chip, timeo);
/* Wait until command is processed or timeout occurs */
timeo = jiffies + msecs_to_jiffies(timeo);
do {
if (chip->legacy.dev_ready(chip))
return;
cond_resched();
} while (time_before(jiffies, timeo));
if (!chip->legacy.dev_ready(chip))
pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
}
EXPORT_SYMBOL_GPL(nand_wait_ready);
/**
* nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
* @chip: NAND chip object
* @timeo: Timeout in ms
*
* Wait for status ready (i.e. command done) or timeout.
*/
static void nand_wait_status_ready(struct nand_chip *chip, unsigned long timeo)
{
int ret;
timeo = jiffies + msecs_to_jiffies(timeo);
do {
u8 status;
ret = nand_read_data_op(chip, &status, sizeof(status), true,
false);
if (ret)
return;
if (status & NAND_STATUS_READY)
break;
touch_softlockup_watchdog();
} while (time_before(jiffies, timeo));
};
/**
* nand_command - [DEFAULT] Send command to NAND device
* @chip: NAND chip object
* @command: the command to be sent
* @column: the column address for this command, -1 if none
* @page_addr: the page address for this command, -1 if none
*
* Send command to NAND device. This function is used for small page devices
* (512 Bytes per page).
*/
static void nand_command(struct nand_chip *chip, unsigned int command,
int column, int page_addr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
/* Write out the command to the device */
if (command == NAND_CMD_SEQIN) {
int readcmd;
if (column >= mtd->writesize) {
/* OOB area */
column -= mtd->writesize;
readcmd = NAND_CMD_READOOB;
} else if (column < 256) {
/* First 256 bytes --> READ0 */
readcmd = NAND_CMD_READ0;
} else {
column -= 256;
readcmd = NAND_CMD_READ1;
}
chip->legacy.cmd_ctrl(chip, readcmd, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
}
if (command != NAND_CMD_NONE)
chip->legacy.cmd_ctrl(chip, command, ctrl);
/* Address cycle, when necessary */
ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
/* Serially input address */
if (column != -1) {
/* Adjust columns for 16 bit buswidth */
if (chip->options & NAND_BUSWIDTH_16 &&
!nand_opcode_8bits(command))
column >>= 1;
chip->legacy.cmd_ctrl(chip, column, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
}
if (page_addr != -1) {
chip->legacy.cmd_ctrl(chip, page_addr, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
chip->legacy.cmd_ctrl(chip, page_addr >> 8, ctrl);
if (chip->options & NAND_ROW_ADDR_3)
chip->legacy.cmd_ctrl(chip, page_addr >> 16, ctrl);
}
chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
/*
* Program and erase have their own busy handlers status and sequential
* in needs no delay
*/
switch (command) {
case NAND_CMD_NONE:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
case NAND_CMD_SEQIN:
case NAND_CMD_STATUS:
case NAND_CMD_READID:
case NAND_CMD_SET_FEATURES:
return;
case NAND_CMD_RESET:
if (chip->legacy.dev_ready)
break;
udelay(chip->legacy.chip_delay);
chip->legacy.cmd_ctrl(chip, NAND_CMD_STATUS,
NAND_CTRL_CLE | NAND_CTRL_CHANGE);
chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
nand_wait_status_ready(chip, 250);
return;
/* This applies to read commands */
case NAND_CMD_READ0:
/*
* READ0 is sometimes used to exit GET STATUS mode. When this
* is the case no address cycles are requested, and we can use
* this information to detect that we should not wait for the
* device to be ready.
*/
if (column == -1 && page_addr == -1)
return;
fallthrough;
default:
/*
* If we don't have access to the busy pin, we apply the given
* command delay
*/
if (!chip->legacy.dev_ready) {
udelay(chip->legacy.chip_delay);
return;
}
}
/*
* Apply this short delay always to ensure that we do wait tWB in
* any case on any machine.
*/
ndelay(100);
nand_wait_ready(chip);
}
static void nand_ccs_delay(struct nand_chip *chip)
{
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(nand_get_interface_config(chip));
/*
* The controller already takes care of waiting for tCCS when the RNDIN
* or RNDOUT command is sent, return directly.
*/
if (!(chip->options & NAND_WAIT_TCCS))
return;
/*
* Wait tCCS_min if it is correctly defined, otherwise wait 500ns
* (which should be safe for all NANDs).
*/
if (!IS_ERR(sdr) && nand_controller_can_setup_interface(chip))
ndelay(sdr->tCCS_min / 1000);
else
ndelay(500);
}
/**
* nand_command_lp - [DEFAULT] Send command to NAND large page device
* @chip: NAND chip object
* @command: the command to be sent
* @column: the column address for this command, -1 if none
* @page_addr: the page address for this command, -1 if none
*
* Send command to NAND device. This is the version for the new large page
* devices. We don't have the separate regions as we have in the small page
* devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
*/
static void nand_command_lp(struct nand_chip *chip, unsigned int command,
int column, int page_addr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
/* Emulate NAND_CMD_READOOB */
if (command == NAND_CMD_READOOB) {
column += mtd->writesize;
command = NAND_CMD_READ0;
}
/* Command latch cycle */
if (command != NAND_CMD_NONE)
chip->legacy.cmd_ctrl(chip, command,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
if (column != -1 || page_addr != -1) {
int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
/* Serially input address */
if (column != -1) {
/* Adjust columns for 16 bit buswidth */
if (chip->options & NAND_BUSWIDTH_16 &&
!nand_opcode_8bits(command))
column >>= 1;
chip->legacy.cmd_ctrl(chip, column, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
/* Only output a single addr cycle for 8bits opcodes. */
if (!nand_opcode_8bits(command))
chip->legacy.cmd_ctrl(chip, column >> 8, ctrl);
}
if (page_addr != -1) {
chip->legacy.cmd_ctrl(chip, page_addr, ctrl);
chip->legacy.cmd_ctrl(chip, page_addr >> 8,
NAND_NCE | NAND_ALE);
if (chip->options & NAND_ROW_ADDR_3)
chip->legacy.cmd_ctrl(chip, page_addr >> 16,
NAND_NCE | NAND_ALE);
}
}
chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
/*
* Program and erase have their own busy handlers status, sequential
* in and status need no delay.
*/
switch (command) {
case NAND_CMD_NONE:
case NAND_CMD_CACHEDPROG:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
case NAND_CMD_SEQIN:
case NAND_CMD_STATUS:
case NAND_CMD_READID:
case NAND_CMD_SET_FEATURES:
return;
case NAND_CMD_RNDIN:
nand_ccs_delay(chip);
return;
case NAND_CMD_RESET:
if (chip->legacy.dev_ready)
break;
udelay(chip->legacy.chip_delay);
chip->legacy.cmd_ctrl(chip, NAND_CMD_STATUS,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
nand_wait_status_ready(chip, 250);
return;
case NAND_CMD_RNDOUT:
/* No ready / busy check necessary */
chip->legacy.cmd_ctrl(chip, NAND_CMD_RNDOUTSTART,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
nand_ccs_delay(chip);
return;
case NAND_CMD_READ0:
/*
* READ0 is sometimes used to exit GET STATUS mode. When this
* is the case no address cycles are requested, and we can use
* this information to detect that READSTART should not be
* issued.
*/
if (column == -1 && page_addr == -1)
return;
chip->legacy.cmd_ctrl(chip, NAND_CMD_READSTART,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
fallthrough; /* This applies to read commands */
default:
/*
* If we don't have access to the busy pin, we apply the given
* command delay.
*/
if (!chip->legacy.dev_ready) {
udelay(chip->legacy.chip_delay);
return;
}
}
/*
* Apply this short delay always to ensure that we do wait tWB in
* any case on any machine.
*/
ndelay(100);
nand_wait_ready(chip);
}
/**
* nand_get_set_features_notsupp - set/get features stub returning -ENOTSUPP
* @chip: nand chip info structure
* @addr: feature address.
* @subfeature_param: the subfeature parameters, a four bytes array.
*
* Should be used by NAND controller drivers that do not support the SET/GET
* FEATURES operations.
*/
int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
u8 *subfeature_param)
{
return -ENOTSUPP;
}
EXPORT_SYMBOL(nand_get_set_features_notsupp);
/**
* nand_wait - [DEFAULT] wait until the command is done
* @chip: NAND chip structure
*
* Wait for command done. This applies to erase and program only.
*/
static int nand_wait(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned long timeo = 400;
u8 status;
int ret;
/*
* Apply this short delay always to ensure that we do wait tWB in any
* case on any machine.
*/
ndelay(100);
ret = nand_status_op(chip, NULL);
if (ret)
return ret;
if (mtd->oops_panic_write) {
panic_nand_wait(chip, timeo);
} else {
timeo = jiffies + msecs_to_jiffies(timeo);
do {
if (chip->legacy.dev_ready) {
if (chip->legacy.dev_ready(chip))
break;
} else {
ret = nand_read_data_op(chip, &status,
sizeof(status), true,
false);
if (ret)
return ret;
if (status & NAND_STATUS_READY)
break;
}
cond_resched();
} while (time_before(jiffies, timeo));
}
ret = nand_read_data_op(chip, &status, sizeof(status), true, false);
if (ret)
return ret;
/* This can happen if in case of timeout or buggy dev_ready */
WARN_ON(!(status & NAND_STATUS_READY));
return status;
}
void nand_legacy_set_defaults(struct nand_chip *chip)
{
unsigned int busw = chip->options & NAND_BUSWIDTH_16;
if (nand_has_exec_op(chip))
return;
/* check for proper chip_delay setup, set 20us if not */
if (!chip->legacy.chip_delay)
chip->legacy.chip_delay = 20;
/* check, if a user supplied command function given */
if (!chip->legacy.cmdfunc)
chip->legacy.cmdfunc = nand_command;
/* check, if a user supplied wait function given */
if (chip->legacy.waitfunc == NULL)
chip->legacy.waitfunc = nand_wait;
if (!chip->legacy.select_chip)
chip->legacy.select_chip = nand_select_chip;
/* If called twice, pointers that depend on busw may need to be reset */
if (!chip->legacy.read_byte || chip->legacy.read_byte == nand_read_byte)
chip->legacy.read_byte = busw ? nand_read_byte16 : nand_read_byte;
if (!chip->legacy.write_buf || chip->legacy.write_buf == nand_write_buf)
chip->legacy.write_buf = busw ? nand_write_buf16 : nand_write_buf;
if (!chip->legacy.write_byte || chip->legacy.write_byte == nand_write_byte)
chip->legacy.write_byte = busw ? nand_write_byte16 : nand_write_byte;
if (!chip->legacy.read_buf || chip->legacy.read_buf == nand_read_buf)
chip->legacy.read_buf = busw ? nand_read_buf16 : nand_read_buf;
}
void nand_legacy_adjust_cmdfunc(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
/* Do not replace user supplied command function! */
if (mtd->writesize > 512 && chip->legacy.cmdfunc == nand_command)
chip->legacy.cmdfunc = nand_command_lp;
}
int nand_legacy_check_hooks(struct nand_chip *chip)
{
/*
* ->legacy.cmdfunc() is legacy and will only be used if ->exec_op() is
* not populated.
*/
if (nand_has_exec_op(chip))
return 0;
/*
* Default functions assigned for ->legacy.cmdfunc() and
* ->legacy.select_chip() both expect ->legacy.cmd_ctrl() to be
* populated.
*/
if ((!chip->legacy.cmdfunc || !chip->legacy.select_chip) &&
!chip->legacy.cmd_ctrl) {
pr_err("->legacy.cmd_ctrl() should be provided\n");
return -EINVAL;
}
return 0;
}
| linux-master | drivers/mtd/nand/raw/nand_legacy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NAND Flash Controller Device Driver
* Copyright © 2009-2010, Intel Corporation and its suppliers.
*/
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "denali.h"
#define DENALI_NAND_NAME "denali-nand-pci"
#define INTEL_CE4100 1
#define INTEL_MRST 2
/* List of platforms this NAND controller has be integrated into */
static const struct pci_device_id denali_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
{ PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, denali_pci_ids);
NAND_ECC_CAPS_SINGLE(denali_pci_ecc_caps, denali_calc_ecc_bytes, 512, 8, 15);
static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
resource_size_t csr_base, mem_base;
unsigned long csr_len, mem_len;
struct denali_controller *denali;
struct denali_chip *dchip;
int nsels, ret, i;
denali = devm_kzalloc(&dev->dev, sizeof(*denali), GFP_KERNEL);
if (!denali)
return -ENOMEM;
ret = pcim_enable_device(dev);
if (ret) {
dev_err(&dev->dev, "Spectra: pci_enable_device failed.\n");
return ret;
}
if (id->driver_data == INTEL_CE4100) {
mem_base = pci_resource_start(dev, 0);
mem_len = pci_resource_len(dev, 1);
csr_base = pci_resource_start(dev, 1);
csr_len = pci_resource_len(dev, 1);
} else {
csr_base = pci_resource_start(dev, 0);
csr_len = pci_resource_len(dev, 0);
mem_base = pci_resource_start(dev, 1);
mem_len = pci_resource_len(dev, 1);
if (!mem_len) {
mem_base = csr_base + csr_len;
mem_len = csr_len;
}
}
pci_set_master(dev);
denali->dev = &dev->dev;
denali->irq = dev->irq;
denali->ecc_caps = &denali_pci_ecc_caps;
denali->clk_rate = 50000000; /* 50 MHz */
denali->clk_x_rate = 200000000; /* 200 MHz */
ret = pci_request_regions(dev, DENALI_NAND_NAME);
if (ret) {
dev_err(&dev->dev, "Spectra: Unable to request memory regions\n");
return ret;
}
denali->reg = devm_ioremap(denali->dev, csr_base, csr_len);
if (!denali->reg) {
dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
return -ENOMEM;
}
denali->host = devm_ioremap(denali->dev, mem_base, mem_len);
if (!denali->host) {
dev_err(&dev->dev, "Spectra: ioremap failed!");
return -ENOMEM;
}
ret = denali_init(denali);
if (ret)
return ret;
nsels = denali->nbanks;
dchip = devm_kzalloc(denali->dev, struct_size(dchip, sels, nsels),
GFP_KERNEL);
if (!dchip) {
ret = -ENOMEM;
goto out_remove_denali;
}
dchip->chip.base.ecc.user_conf.flags |= NAND_ECC_MAXIMIZE_STRENGTH;
dchip->nsels = nsels;
for (i = 0; i < nsels; i++)
dchip->sels[i].bank = i;
ret = denali_chip_init(denali, dchip);
if (ret)
goto out_remove_denali;
pci_set_drvdata(dev, denali);
return 0;
out_remove_denali:
denali_remove(denali);
return ret;
}
static void denali_pci_remove(struct pci_dev *dev)
{
struct denali_controller *denali = pci_get_drvdata(dev);
denali_remove(denali);
}
static struct pci_driver denali_pci_driver = {
.name = DENALI_NAND_NAME,
.id_table = denali_pci_ids,
.probe = denali_pci_probe,
.remove = denali_pci_remove,
};
module_pci_driver(denali_pci_driver);
MODULE_DESCRIPTION("PCI driver for Denali NAND controller");
MODULE_AUTHOR("Intel Corporation and its suppliers");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mtd/nand/raw/denali_pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NAND Flash Controller Device Driver
* Copyright © 2009-2010, Intel Corporation and its suppliers.
*
* Copyright (c) 2017-2019 Socionext Inc.
* Reworked by Masahiro Yamada <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "denali.h"
#define DENALI_NAND_NAME "denali-nand"
/* for Indexed Addressing */
#define DENALI_INDEXED_CTRL 0x00
#define DENALI_INDEXED_DATA 0x10
#define DENALI_MAP00 (0 << 26) /* direct access to buffer */
#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
#define DENALI_MAP10 (2 << 26) /* high-level control plane */
#define DENALI_MAP11 (3 << 26) /* direct controller access */
/* MAP11 access cycle type */
#define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */
#define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
#define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
#define DENALI_BANK(denali) ((denali)->active_bank << 24)
#define DENALI_INVALID_BANK -1
static struct denali_chip *to_denali_chip(struct nand_chip *chip)
{
return container_of(chip, struct denali_chip, chip);
}
static struct denali_controller *to_denali_controller(struct nand_chip *chip)
{
return container_of(chip->controller, struct denali_controller,
controller);
}
/*
* Direct Addressing - the slave address forms the control information (command
* type, bank, block, and page address). The slave data is the actual data to
* be transferred. This mode requires 28 bits of address region allocated.
*/
static u32 denali_direct_read(struct denali_controller *denali, u32 addr)
{
return ioread32(denali->host + addr);
}
static void denali_direct_write(struct denali_controller *denali, u32 addr,
u32 data)
{
iowrite32(data, denali->host + addr);
}
/*
* Indexed Addressing - address translation module intervenes in passing the
* control information. This mode reduces the required address range. The
* control information and transferred data are latched by the registers in
* the translation module.
*/
static u32 denali_indexed_read(struct denali_controller *denali, u32 addr)
{
iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
return ioread32(denali->host + DENALI_INDEXED_DATA);
}
static void denali_indexed_write(struct denali_controller *denali, u32 addr,
u32 data)
{
iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
iowrite32(data, denali->host + DENALI_INDEXED_DATA);
}
static void denali_enable_irq(struct denali_controller *denali)
{
int i;
for (i = 0; i < denali->nbanks; i++)
iowrite32(U32_MAX, denali->reg + INTR_EN(i));
iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
}
static void denali_disable_irq(struct denali_controller *denali)
{
int i;
for (i = 0; i < denali->nbanks; i++)
iowrite32(0, denali->reg + INTR_EN(i));
iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
}
static void denali_clear_irq(struct denali_controller *denali,
int bank, u32 irq_status)
{
/* write one to clear bits */
iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
}
static void denali_clear_irq_all(struct denali_controller *denali)
{
int i;
for (i = 0; i < denali->nbanks; i++)
denali_clear_irq(denali, i, U32_MAX);
}
static irqreturn_t denali_isr(int irq, void *dev_id)
{
struct denali_controller *denali = dev_id;
irqreturn_t ret = IRQ_NONE;
u32 irq_status;
int i;
spin_lock(&denali->irq_lock);
for (i = 0; i < denali->nbanks; i++) {
irq_status = ioread32(denali->reg + INTR_STATUS(i));
if (irq_status)
ret = IRQ_HANDLED;
denali_clear_irq(denali, i, irq_status);
if (i != denali->active_bank)
continue;
denali->irq_status |= irq_status;
if (denali->irq_status & denali->irq_mask)
complete(&denali->complete);
}
spin_unlock(&denali->irq_lock);
return ret;
}
static void denali_reset_irq(struct denali_controller *denali)
{
unsigned long flags;
spin_lock_irqsave(&denali->irq_lock, flags);
denali->irq_status = 0;
denali->irq_mask = 0;
spin_unlock_irqrestore(&denali->irq_lock, flags);
}
static u32 denali_wait_for_irq(struct denali_controller *denali, u32 irq_mask)
{
unsigned long time_left, flags;
u32 irq_status;
spin_lock_irqsave(&denali->irq_lock, flags);
irq_status = denali->irq_status;
if (irq_mask & irq_status) {
/* return immediately if the IRQ has already happened. */
spin_unlock_irqrestore(&denali->irq_lock, flags);
return irq_status;
}
denali->irq_mask = irq_mask;
reinit_completion(&denali->complete);
spin_unlock_irqrestore(&denali->irq_lock, flags);
time_left = wait_for_completion_timeout(&denali->complete,
msecs_to_jiffies(1000));
if (!time_left) {
dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
irq_mask);
return 0;
}
return denali->irq_status;
}
static void denali_select_target(struct nand_chip *chip, int cs)
{
struct denali_controller *denali = to_denali_controller(chip);
struct denali_chip_sel *sel = &to_denali_chip(chip)->sels[cs];
struct mtd_info *mtd = nand_to_mtd(chip);
denali->active_bank = sel->bank;
iowrite32(1 << (chip->phys_erase_shift - chip->page_shift),
denali->reg + PAGES_PER_BLOCK);
iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
denali->reg + DEVICE_WIDTH);
iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
iowrite32(chip->options & NAND_ROW_ADDR_3 ?
0 : TWO_ROW_ADDR_CYCLES__FLAG,
denali->reg + TWO_ROW_ADDR_CYCLES);
iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
denali->reg + ECC_CORRECTION);
iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
iowrite32(chip->ecc.steps, denali->reg + CFG_NUM_DATA_BLOCKS);
if (chip->options & NAND_KEEP_TIMINGS)
return;
/* update timing registers unless NAND_KEEP_TIMINGS is set */
iowrite32(sel->hwhr2_and_we_2_re, denali->reg + TWHR2_AND_WE_2_RE);
iowrite32(sel->tcwaw_and_addr_2_data,
denali->reg + TCWAW_AND_ADDR_2_DATA);
iowrite32(sel->re_2_we, denali->reg + RE_2_WE);
iowrite32(sel->acc_clks, denali->reg + ACC_CLKS);
iowrite32(sel->rdwr_en_lo_cnt, denali->reg + RDWR_EN_LO_CNT);
iowrite32(sel->rdwr_en_hi_cnt, denali->reg + RDWR_EN_HI_CNT);
iowrite32(sel->cs_setup_cnt, denali->reg + CS_SETUP_CNT);
iowrite32(sel->re_2_re, denali->reg + RE_2_RE);
}
static int denali_change_column(struct nand_chip *chip, unsigned int offset,
void *buf, unsigned int len, bool write)
{
if (write)
return nand_change_write_column_op(chip, offset, buf, len,
false);
else
return nand_change_read_column_op(chip, offset, buf, len,
false);
}
static int denali_payload_xfer(struct nand_chip *chip, void *buf, bool write)
{
struct denali_controller *denali = to_denali_controller(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int writesize = mtd->writesize;
int oob_skip = denali->oob_skip_bytes;
int ret, i, pos, len;
for (i = 0; i < ecc->steps; i++) {
pos = i * (ecc->size + ecc->bytes);
len = ecc->size;
if (pos >= writesize) {
pos += oob_skip;
} else if (pos + len > writesize) {
/* This chunk overwraps the BBM area. Must be split */
ret = denali_change_column(chip, pos, buf,
writesize - pos, write);
if (ret)
return ret;
buf += writesize - pos;
len -= writesize - pos;
pos = writesize + oob_skip;
}
ret = denali_change_column(chip, pos, buf, len, write);
if (ret)
return ret;
buf += len;
}
return 0;
}
static int denali_oob_xfer(struct nand_chip *chip, void *buf, bool write)
{
struct denali_controller *denali = to_denali_controller(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int writesize = mtd->writesize;
int oobsize = mtd->oobsize;
int oob_skip = denali->oob_skip_bytes;
int ret, i, pos, len;
/* BBM at the beginning of the OOB area */
ret = denali_change_column(chip, writesize, buf, oob_skip, write);
if (ret)
return ret;
buf += oob_skip;
for (i = 0; i < ecc->steps; i++) {
pos = ecc->size + i * (ecc->size + ecc->bytes);
if (i == ecc->steps - 1)
/* The last chunk includes OOB free */
len = writesize + oobsize - pos - oob_skip;
else
len = ecc->bytes;
if (pos >= writesize) {
pos += oob_skip;
} else if (pos + len > writesize) {
/* This chunk overwraps the BBM area. Must be split */
ret = denali_change_column(chip, pos, buf,
writesize - pos, write);
if (ret)
return ret;
buf += writesize - pos;
len -= writesize - pos;
pos = writesize + oob_skip;
}
ret = denali_change_column(chip, pos, buf, len, write);
if (ret)
return ret;
buf += len;
}
return 0;
}
static int denali_read_raw(struct nand_chip *chip, void *buf, void *oob_buf,
int page)
{
int ret;
if (!buf && !oob_buf)
return -EINVAL;
ret = nand_read_page_op(chip, page, 0, NULL, 0);
if (ret)
return ret;
if (buf) {
ret = denali_payload_xfer(chip, buf, false);
if (ret)
return ret;
}
if (oob_buf) {
ret = denali_oob_xfer(chip, oob_buf, false);
if (ret)
return ret;
}
return 0;
}
static int denali_write_raw(struct nand_chip *chip, const void *buf,
const void *oob_buf, int page)
{
int ret;
if (!buf && !oob_buf)
return -EINVAL;
ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
if (ret)
return ret;
if (buf) {
ret = denali_payload_xfer(chip, (void *)buf, true);
if (ret)
return ret;
}
if (oob_buf) {
ret = denali_oob_xfer(chip, (void *)oob_buf, true);
if (ret)
return ret;
}
return nand_prog_page_end_op(chip);
}
static int denali_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
return denali_read_raw(chip, buf, oob_required ? chip->oob_poi : NULL,
page);
}
static int denali_write_page_raw(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
return denali_write_raw(chip, buf, oob_required ? chip->oob_poi : NULL,
page);
}
static int denali_read_oob(struct nand_chip *chip, int page)
{
return denali_read_raw(chip, NULL, chip->oob_poi, page);
}
static int denali_write_oob(struct nand_chip *chip, int page)
{
return denali_write_raw(chip, NULL, chip->oob_poi, page);
}
static int denali_check_erased_page(struct nand_chip *chip, u8 *buf,
unsigned long uncor_ecc_flags,
unsigned int max_bitflips)
{
struct denali_controller *denali = to_denali_controller(chip);
struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
int i, stat;
for (i = 0; i < ecc->steps; i++) {
if (!(uncor_ecc_flags & BIT(i)))
continue;
stat = nand_check_erased_ecc_chunk(buf, ecc->size, ecc_code,
ecc->bytes, NULL, 0,
ecc->strength);
if (stat < 0) {
ecc_stats->failed++;
} else {
ecc_stats->corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
buf += ecc->size;
ecc_code += ecc->bytes;
}
return max_bitflips;
}
static int denali_hw_ecc_fixup(struct nand_chip *chip,
unsigned long *uncor_ecc_flags)
{
struct denali_controller *denali = to_denali_controller(chip);
struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
int bank = denali->active_bank;
u32 ecc_cor;
unsigned int max_bitflips;
ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
/*
* This flag is set when uncorrectable error occurs at least in
* one ECC sector. We can not know "how many sectors", or
* "which sector(s)". We need erase-page check for all sectors.
*/
*uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
return 0;
}
max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
/*
* The register holds the maximum of per-sector corrected bitflips.
* This is suitable for the return value of the ->read_page() callback.
* Unfortunately, we can not know the total number of corrected bits in
* the page. Increase the stats by max_bitflips. (compromised solution)
*/
ecc_stats->corrected += max_bitflips;
return max_bitflips;
}
static int denali_sw_ecc_fixup(struct nand_chip *chip,
unsigned long *uncor_ecc_flags, u8 *buf)
{
struct denali_controller *denali = to_denali_controller(chip);
struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
unsigned int ecc_size = chip->ecc.size;
unsigned int bitflips = 0;
unsigned int max_bitflips = 0;
u32 err_addr, err_cor_info;
unsigned int err_byte, err_sector, err_device;
u8 err_cor_value;
unsigned int prev_sector = 0;
u32 irq_status;
denali_reset_irq(denali);
do {
err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
err_cor_info);
err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
err_cor_info);
/* reset the bitflip counter when crossing ECC sector */
if (err_sector != prev_sector)
bitflips = 0;
if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
/*
* Check later if this is a real ECC error, or
* an erased sector.
*/
*uncor_ecc_flags |= BIT(err_sector);
} else if (err_byte < ecc_size) {
/*
* If err_byte is larger than ecc_size, means error
* happened in OOB, so we ignore it. It's no need for
* us to correct it err_device is represented the NAND
* error bits are happened in if there are more than
* one NAND connected.
*/
int offset;
unsigned int flips_in_byte;
offset = (err_sector * ecc_size + err_byte) *
denali->devs_per_cs + err_device;
/* correct the ECC error */
flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
buf[offset] ^= err_cor_value;
ecc_stats->corrected += flips_in_byte;
bitflips += flips_in_byte;
max_bitflips = max(max_bitflips, bitflips);
}
prev_sector = err_sector;
} while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
/*
* Once handle all ECC errors, controller will trigger an
* ECC_TRANSACTION_DONE interrupt.
*/
irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
return -EIO;
return max_bitflips;
}
static void denali_setup_dma64(struct denali_controller *denali,
dma_addr_t dma_addr, int page, bool write)
{
u32 mode;
const int page_count = 1;
mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
/* DMA is a three step process */
/*
* 1. setup transfer type, interrupt when complete,
* burst len = 64 bytes, the number of pages
*/
denali->host_write(denali, mode,
0x01002000 | (64 << 16) |
(write ? BIT(8) : 0) | page_count);
/* 2. set memory low address */
denali->host_write(denali, mode, lower_32_bits(dma_addr));
/* 3. set memory high address */
denali->host_write(denali, mode, upper_32_bits(dma_addr));
}
static void denali_setup_dma32(struct denali_controller *denali,
dma_addr_t dma_addr, int page, bool write)
{
u32 mode;
const int page_count = 1;
mode = DENALI_MAP10 | DENALI_BANK(denali);
/* DMA is a four step process */
/* 1. setup transfer type and # of pages */
denali->host_write(denali, mode | page,
0x2000 | (write ? BIT(8) : 0) | page_count);
/* 2. set memory high address bits 23:8 */
denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
/* 3. set memory low address bits 23:8 */
denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
/* 4. interrupt when complete, burst len = 64 bytes */
denali->host_write(denali, mode | 0x14000, 0x2400);
}
static int denali_pio_read(struct denali_controller *denali, u32 *buf,
size_t size, int page)
{
u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
u32 irq_status, ecc_err_mask;
int i;
if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
ecc_err_mask = INTR__ECC_UNCOR_ERR;
else
ecc_err_mask = INTR__ECC_ERR;
denali_reset_irq(denali);
for (i = 0; i < size / 4; i++)
buf[i] = denali->host_read(denali, addr);
irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
if (!(irq_status & INTR__PAGE_XFER_INC))
return -EIO;
if (irq_status & INTR__ERASED_PAGE)
memset(buf, 0xff, size);
return irq_status & ecc_err_mask ? -EBADMSG : 0;
}
static int denali_pio_write(struct denali_controller *denali, const u32 *buf,
size_t size, int page)
{
u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
u32 irq_status;
int i;
denali_reset_irq(denali);
for (i = 0; i < size / 4; i++)
denali->host_write(denali, addr, buf[i]);
irq_status = denali_wait_for_irq(denali,
INTR__PROGRAM_COMP |
INTR__PROGRAM_FAIL);
if (!(irq_status & INTR__PROGRAM_COMP))
return -EIO;
return 0;
}
static int denali_pio_xfer(struct denali_controller *denali, void *buf,
size_t size, int page, bool write)
{
if (write)
return denali_pio_write(denali, buf, size, page);
else
return denali_pio_read(denali, buf, size, page);
}
static int denali_dma_xfer(struct denali_controller *denali, void *buf,
size_t size, int page, bool write)
{
dma_addr_t dma_addr;
u32 irq_mask, irq_status, ecc_err_mask;
enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
int ret = 0;
dma_addr = dma_map_single(denali->dev, buf, size, dir);
if (dma_mapping_error(denali->dev, dma_addr)) {
dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
return denali_pio_xfer(denali, buf, size, page, write);
}
if (write) {
/*
* INTR__PROGRAM_COMP is never asserted for the DMA transfer.
* We can use INTR__DMA_CMD_COMP instead. This flag is asserted
* when the page program is completed.
*/
irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
ecc_err_mask = 0;
} else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
irq_mask = INTR__DMA_CMD_COMP;
ecc_err_mask = INTR__ECC_UNCOR_ERR;
} else {
irq_mask = INTR__DMA_CMD_COMP;
ecc_err_mask = INTR__ECC_ERR;
}
iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
/*
* The ->setup_dma() hook kicks DMA by using the data/command
* interface, which belongs to a different AXI port from the
* register interface. Read back the register to avoid a race.
*/
ioread32(denali->reg + DMA_ENABLE);
denali_reset_irq(denali);
denali->setup_dma(denali, dma_addr, page, write);
irq_status = denali_wait_for_irq(denali, irq_mask);
if (!(irq_status & INTR__DMA_CMD_COMP))
ret = -EIO;
else if (irq_status & ecc_err_mask)
ret = -EBADMSG;
iowrite32(0, denali->reg + DMA_ENABLE);
dma_unmap_single(denali->dev, dma_addr, size, dir);
if (irq_status & INTR__ERASED_PAGE)
memset(buf, 0xff, size);
return ret;
}
static int denali_page_xfer(struct nand_chip *chip, void *buf, size_t size,
int page, bool write)
{
struct denali_controller *denali = to_denali_controller(chip);
denali_select_target(chip, chip->cur_cs);
if (denali->dma_avail)
return denali_dma_xfer(denali, buf, size, page, write);
else
return denali_pio_xfer(denali, buf, size, page, write);
}
static int denali_read_page(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct denali_controller *denali = to_denali_controller(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned long uncor_ecc_flags = 0;
int stat = 0;
int ret;
ret = denali_page_xfer(chip, buf, mtd->writesize, page, false);
if (ret && ret != -EBADMSG)
return ret;
if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
stat = denali_hw_ecc_fixup(chip, &uncor_ecc_flags);
else if (ret == -EBADMSG)
stat = denali_sw_ecc_fixup(chip, &uncor_ecc_flags, buf);
if (stat < 0)
return stat;
if (uncor_ecc_flags) {
ret = denali_read_oob(chip, page);
if (ret)
return ret;
stat = denali_check_erased_page(chip, buf,
uncor_ecc_flags, stat);
}
return stat;
}
static int denali_write_page(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true);
}
static int denali_setup_interface(struct nand_chip *chip, int chipnr,
const struct nand_interface_config *conf)
{
static const unsigned int data_setup_on_host = 10000;
struct denali_controller *denali = to_denali_controller(chip);
struct denali_chip_sel *sel;
const struct nand_sdr_timings *timings;
unsigned long t_x, mult_x;
int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
int addr_2_data_mask;
u32 tmp;
timings = nand_get_sdr_timings(conf);
if (IS_ERR(timings))
return PTR_ERR(timings);
/* clk_x period in picoseconds */
t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
if (!t_x)
return -EINVAL;
/*
* The bus interface clock, clk_x, is phase aligned with the core clock.
* The clk_x is an integral multiple N of the core clk. The value N is
* configured at IP delivery time, and its available value is 4, 5, 6.
*/
mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate);
if (mult_x < 4 || mult_x > 6)
return -EINVAL;
if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
sel = &to_denali_chip(chip)->sels[chipnr];
/* tRWH -> RE_2_WE */
re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
tmp = ioread32(denali->reg + RE_2_WE);
tmp &= ~RE_2_WE__VALUE;
tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
sel->re_2_we = tmp;
/* tRHZ -> RE_2_RE */
re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
tmp = ioread32(denali->reg + RE_2_RE);
tmp &= ~RE_2_RE__VALUE;
tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
sel->re_2_re = tmp;
/*
* tCCS, tWHR -> WE_2_RE
*
* With WE_2_RE properly set, the Denali controller automatically takes
* care of the delay; the driver need not set NAND_WAIT_TCCS.
*/
we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x);
we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
sel->hwhr2_and_we_2_re = tmp;
/* tADL -> ADDR_2_DATA */
/* for older versions, ADDR_2_DATA is only 6 bit wide */
addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
if (denali->revision < 0x0501)
addr_2_data_mask >>= 1;
addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x);
addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
sel->tcwaw_and_addr_2_data = tmp;
/* tREH, tWH -> RDWR_EN_HI_CNT */
rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
t_x);
rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
tmp &= ~RDWR_EN_HI_CNT__VALUE;
tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
sel->rdwr_en_hi_cnt = tmp;
/*
* tREA -> ACC_CLKS
* tRP, tWP, tRHOH, tRC, tWC -> RDWR_EN_LO_CNT
*/
/*
* Determine the minimum of acc_clks to meet the setup timing when
* capturing the incoming data.
*
* The delay on the chip side is well-defined as tREA, but we need to
* take additional delay into account. This includes a certain degree
* of unknowledge, such as signal propagation delays on the PCB and
* in the SoC, load capacity of the I/O pins, etc.
*/
acc_clks = DIV_ROUND_UP(timings->tREA_max + data_setup_on_host, t_x);
/* Determine the minimum of rdwr_en_lo_cnt from RE#/WE# pulse width */
rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
/* Extend rdwr_en_lo to meet the data hold timing */
rdwr_en_lo = max_t(int, rdwr_en_lo,
acc_clks - timings->tRHOH_min / t_x);
/* Extend rdwr_en_lo to meet the requirement for RE#/WE# cycle time */
rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
t_x);
rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
/* Center the data latch timing for extra safety */
acc_clks = (acc_clks + rdwr_en_lo +
DIV_ROUND_UP(timings->tRHOH_min, t_x)) / 2;
acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
tmp = ioread32(denali->reg + ACC_CLKS);
tmp &= ~ACC_CLKS__VALUE;
tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
sel->acc_clks = tmp;
tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
tmp &= ~RDWR_EN_LO_CNT__VALUE;
tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
sel->rdwr_en_lo_cnt = tmp;
/* tCS, tCEA -> CS_SETUP_CNT */
cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
(int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks,
0);
cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
tmp = ioread32(denali->reg + CS_SETUP_CNT);
tmp &= ~CS_SETUP_CNT__VALUE;
tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
sel->cs_setup_cnt = tmp;
return 0;
}
int denali_calc_ecc_bytes(int step_size, int strength)
{
/* BCH code. Denali requires ecc.bytes to be multiple of 2 */
return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
}
EXPORT_SYMBOL(denali_calc_ecc_bytes);
static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct denali_controller *denali = to_denali_controller(chip);
if (section > 0)
return -ERANGE;
oobregion->offset = denali->oob_skip_bytes;
oobregion->length = chip->ecc.total;
return 0;
}
static int denali_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct denali_controller *denali = to_denali_controller(chip);
if (section > 0)
return -ERANGE;
oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
oobregion->length = mtd->oobsize - oobregion->offset;
return 0;
}
static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
.ecc = denali_ooblayout_ecc,
.free = denali_ooblayout_free,
};
static int denali_multidev_fixup(struct nand_chip *chip)
{
struct denali_controller *denali = to_denali_controller(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
memorg = nanddev_get_memorg(&chip->base);
/*
* Support for multi device:
* When the IP configuration is x16 capable and two x8 chips are
* connected in parallel, DEVICES_CONNECTED should be set to 2.
* In this case, the core framework knows nothing about this fact,
* so we should tell it the _logical_ pagesize and anything necessary.
*/
denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
/*
* On some SoCs, DEVICES_CONNECTED is not auto-detected.
* For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
*/
if (denali->devs_per_cs == 0) {
denali->devs_per_cs = 1;
iowrite32(1, denali->reg + DEVICES_CONNECTED);
}
if (denali->devs_per_cs == 1)
return 0;
if (denali->devs_per_cs != 2) {
dev_err(denali->dev, "unsupported number of devices %d\n",
denali->devs_per_cs);
return -EINVAL;
}
/* 2 chips in parallel */
memorg->pagesize <<= 1;
memorg->oobsize <<= 1;
mtd->size <<= 1;
mtd->erasesize <<= 1;
mtd->writesize <<= 1;
mtd->oobsize <<= 1;
chip->page_shift += 1;
chip->phys_erase_shift += 1;
chip->bbt_erase_shift += 1;
chip->chip_shift += 1;
chip->pagemask <<= 1;
chip->ecc.size <<= 1;
chip->ecc.bytes <<= 1;
chip->ecc.strength <<= 1;
denali->oob_skip_bytes <<= 1;
return 0;
}
static int denali_attach_chip(struct nand_chip *chip)
{
struct denali_controller *denali = to_denali_controller(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
ret = nand_ecc_choose_conf(chip, denali->ecc_caps,
mtd->oobsize - denali->oob_skip_bytes);
if (ret) {
dev_err(denali->dev, "Failed to setup ECC settings.\n");
return ret;
}
dev_dbg(denali->dev,
"chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
ret = denali_multidev_fixup(chip);
if (ret)
return ret;
return 0;
}
static void denali_exec_in8(struct denali_controller *denali, u32 type,
u8 *buf, unsigned int len)
{
int i;
for (i = 0; i < len; i++)
buf[i] = denali->host_read(denali, type | DENALI_BANK(denali));
}
static void denali_exec_in16(struct denali_controller *denali, u32 type,
u8 *buf, unsigned int len)
{
u32 data;
int i;
for (i = 0; i < len; i += 2) {
data = denali->host_read(denali, type | DENALI_BANK(denali));
/* bit 31:24 and 15:8 are used for DDR */
buf[i] = data;
buf[i + 1] = data >> 16;
}
}
static void denali_exec_in(struct denali_controller *denali, u32 type,
u8 *buf, unsigned int len, bool width16)
{
if (width16)
denali_exec_in16(denali, type, buf, len);
else
denali_exec_in8(denali, type, buf, len);
}
static void denali_exec_out8(struct denali_controller *denali, u32 type,
const u8 *buf, unsigned int len)
{
int i;
for (i = 0; i < len; i++)
denali->host_write(denali, type | DENALI_BANK(denali), buf[i]);
}
static void denali_exec_out16(struct denali_controller *denali, u32 type,
const u8 *buf, unsigned int len)
{
int i;
for (i = 0; i < len; i += 2)
denali->host_write(denali, type | DENALI_BANK(denali),
buf[i + 1] << 16 | buf[i]);
}
static void denali_exec_out(struct denali_controller *denali, u32 type,
const u8 *buf, unsigned int len, bool width16)
{
if (width16)
denali_exec_out16(denali, type, buf, len);
else
denali_exec_out8(denali, type, buf, len);
}
static int denali_exec_waitrdy(struct denali_controller *denali)
{
u32 irq_stat;
/* R/B# pin transitioned from low to high? */
irq_stat = denali_wait_for_irq(denali, INTR__INT_ACT);
/* Just in case nand_operation has multiple NAND_OP_WAITRDY_INSTR. */
denali_reset_irq(denali);
return irq_stat & INTR__INT_ACT ? 0 : -EIO;
}
static int denali_exec_instr(struct nand_chip *chip,
const struct nand_op_instr *instr)
{
struct denali_controller *denali = to_denali_controller(chip);
switch (instr->type) {
case NAND_OP_CMD_INSTR:
denali_exec_out8(denali, DENALI_MAP11_CMD,
&instr->ctx.cmd.opcode, 1);
return 0;
case NAND_OP_ADDR_INSTR:
denali_exec_out8(denali, DENALI_MAP11_ADDR,
instr->ctx.addr.addrs,
instr->ctx.addr.naddrs);
return 0;
case NAND_OP_DATA_IN_INSTR:
denali_exec_in(denali, DENALI_MAP11_DATA,
instr->ctx.data.buf.in,
instr->ctx.data.len,
!instr->ctx.data.force_8bit &&
chip->options & NAND_BUSWIDTH_16);
return 0;
case NAND_OP_DATA_OUT_INSTR:
denali_exec_out(denali, DENALI_MAP11_DATA,
instr->ctx.data.buf.out,
instr->ctx.data.len,
!instr->ctx.data.force_8bit &&
chip->options & NAND_BUSWIDTH_16);
return 0;
case NAND_OP_WAITRDY_INSTR:
return denali_exec_waitrdy(denali);
default:
WARN_ONCE(1, "unsupported NAND instruction type: %d\n",
instr->type);
return -EINVAL;
}
}
static int denali_exec_op(struct nand_chip *chip,
const struct nand_operation *op, bool check_only)
{
int i, ret;
if (check_only)
return 0;
denali_select_target(chip, op->cs);
/*
* Some commands contain NAND_OP_WAITRDY_INSTR.
* irq must be cleared here to catch the R/B# interrupt there.
*/
denali_reset_irq(to_denali_controller(chip));
for (i = 0; i < op->ninstrs; i++) {
ret = denali_exec_instr(chip, &op->instrs[i]);
if (ret)
return ret;
}
return 0;
}
static const struct nand_controller_ops denali_controller_ops = {
.attach_chip = denali_attach_chip,
.exec_op = denali_exec_op,
.setup_interface = denali_setup_interface,
};
int denali_chip_init(struct denali_controller *denali,
struct denali_chip *dchip)
{
struct nand_chip *chip = &dchip->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
struct denali_chip *dchip2;
int i, j, ret;
chip->controller = &denali->controller;
/* sanity checks for bank numbers */
for (i = 0; i < dchip->nsels; i++) {
unsigned int bank = dchip->sels[i].bank;
if (bank >= denali->nbanks) {
dev_err(denali->dev, "unsupported bank %d\n", bank);
return -EINVAL;
}
for (j = 0; j < i; j++) {
if (bank == dchip->sels[j].bank) {
dev_err(denali->dev,
"bank %d is assigned twice in the same chip\n",
bank);
return -EINVAL;
}
}
list_for_each_entry(dchip2, &denali->chips, node) {
for (j = 0; j < dchip2->nsels; j++) {
if (bank == dchip2->sels[j].bank) {
dev_err(denali->dev,
"bank %d is already used\n",
bank);
return -EINVAL;
}
}
}
}
mtd->dev.parent = denali->dev;
/*
* Fallback to the default name if DT did not give "label" property.
* Use "label" property if multiple chips are connected.
*/
if (!mtd->name && list_empty(&denali->chips))
mtd->name = "denali-nand";
if (denali->dma_avail) {
chip->options |= NAND_USES_DMA;
chip->buf_align = 16;
}
/* clk rate info is needed for setup_interface */
if (!denali->clk_rate || !denali->clk_x_rate)
chip->options |= NAND_KEEP_TIMINGS;
chip->bbt_options |= NAND_BBT_USE_FLASH;
chip->bbt_options |= NAND_BBT_NO_OOB;
chip->options |= NAND_NO_SUBPAGE_WRITE;
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
chip->ecc.read_page = denali_read_page;
chip->ecc.write_page = denali_write_page;
chip->ecc.read_page_raw = denali_read_page_raw;
chip->ecc.write_page_raw = denali_write_page_raw;
chip->ecc.read_oob = denali_read_oob;
chip->ecc.write_oob = denali_write_oob;
mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
ret = nand_scan(chip, dchip->nsels);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
goto cleanup_nand;
}
list_add_tail(&dchip->node, &denali->chips);
return 0;
cleanup_nand:
nand_cleanup(chip);
return ret;
}
EXPORT_SYMBOL_GPL(denali_chip_init);
int denali_init(struct denali_controller *denali)
{
u32 features = ioread32(denali->reg + FEATURES);
int ret;
nand_controller_init(&denali->controller);
denali->controller.ops = &denali_controller_ops;
init_completion(&denali->complete);
spin_lock_init(&denali->irq_lock);
INIT_LIST_HEAD(&denali->chips);
denali->active_bank = DENALI_INVALID_BANK;
/*
* The REVISION register may not be reliable. Platforms are allowed to
* override it.
*/
if (!denali->revision)
denali->revision = swab16(ioread32(denali->reg + REVISION));
denali->nbanks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
/* the encoding changed from rev 5.0 to 5.1 */
if (denali->revision < 0x0501)
denali->nbanks <<= 1;
if (features & FEATURES__DMA)
denali->dma_avail = true;
if (denali->dma_avail) {
int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
if (ret) {
dev_info(denali->dev,
"Failed to set DMA mask. Disabling DMA.\n");
denali->dma_avail = false;
}
}
if (denali->dma_avail) {
if (denali->caps & DENALI_CAP_DMA_64BIT)
denali->setup_dma = denali_setup_dma64;
else
denali->setup_dma = denali_setup_dma32;
}
if (features & FEATURES__INDEX_ADDR) {
denali->host_read = denali_indexed_read;
denali->host_write = denali_indexed_write;
} else {
denali->host_read = denali_direct_read;
denali->host_write = denali_direct_write;
}
/*
* Set how many bytes should be skipped before writing data in OOB.
* If a platform requests a non-zero value, set it to the register.
* Otherwise, read the value out, expecting it has already been set up
* by firmware.
*/
if (denali->oob_skip_bytes)
iowrite32(denali->oob_skip_bytes,
denali->reg + SPARE_AREA_SKIP_BYTES);
else
denali->oob_skip_bytes = ioread32(denali->reg +
SPARE_AREA_SKIP_BYTES);
iowrite32(0, denali->reg + TRANSFER_SPARE_REG);
iowrite32(GENMASK(denali->nbanks - 1, 0), denali->reg + RB_PIN_ENABLED);
iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
iowrite32(ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
iowrite32(WRITE_PROTECT__FLAG, denali->reg + WRITE_PROTECT);
denali_clear_irq_all(denali);
ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
IRQF_SHARED, DENALI_NAND_NAME, denali);
if (ret) {
dev_err(denali->dev, "Unable to request IRQ\n");
return ret;
}
denali_enable_irq(denali);
return 0;
}
EXPORT_SYMBOL(denali_init);
void denali_remove(struct denali_controller *denali)
{
struct denali_chip *dchip, *tmp;
struct nand_chip *chip;
int ret;
list_for_each_entry_safe(dchip, tmp, &denali->chips, node) {
chip = &dchip->chip;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
list_del(&dchip->node);
}
denali_disable_irq(denali);
}
EXPORT_SYMBOL(denali_remove);
MODULE_DESCRIPTION("Driver core for Denali NAND controller");
MODULE_AUTHOR("Intel Corporation and its suppliers");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mtd/nand/raw/denali.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for NAND MLC Controller in LPC32xx
*
* Author: Roland Stigge <[email protected]>
*
* Copyright © 2011 WORK Microwave GmbH
* Copyright © 2011, 2012 Roland Stigge
*
* NAND Flash Controller Operation:
* - Read: Auto Decode
* - Write: Auto Encode
* - Tested Page Sizes: 2048, 4096
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/gpio/consumer.h>
#include <linux/mtd/lpc32xx_mlc.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#define DRV_NAME "lpc32xx_mlc"
/**********************************************************************
* MLC NAND controller register offsets
**********************************************************************/
#define MLC_BUFF(x) (x + 0x00000)
#define MLC_DATA(x) (x + 0x08000)
#define MLC_CMD(x) (x + 0x10000)
#define MLC_ADDR(x) (x + 0x10004)
#define MLC_ECC_ENC_REG(x) (x + 0x10008)
#define MLC_ECC_DEC_REG(x) (x + 0x1000C)
#define MLC_ECC_AUTO_ENC_REG(x) (x + 0x10010)
#define MLC_ECC_AUTO_DEC_REG(x) (x + 0x10014)
#define MLC_RPR(x) (x + 0x10018)
#define MLC_WPR(x) (x + 0x1001C)
#define MLC_RUBP(x) (x + 0x10020)
#define MLC_ROBP(x) (x + 0x10024)
#define MLC_SW_WP_ADD_LOW(x) (x + 0x10028)
#define MLC_SW_WP_ADD_HIG(x) (x + 0x1002C)
#define MLC_ICR(x) (x + 0x10030)
#define MLC_TIME_REG(x) (x + 0x10034)
#define MLC_IRQ_MR(x) (x + 0x10038)
#define MLC_IRQ_SR(x) (x + 0x1003C)
#define MLC_LOCK_PR(x) (x + 0x10044)
#define MLC_ISR(x) (x + 0x10048)
#define MLC_CEH(x) (x + 0x1004C)
/**********************************************************************
* MLC_CMD bit definitions
**********************************************************************/
#define MLCCMD_RESET 0xFF
/**********************************************************************
* MLC_ICR bit definitions
**********************************************************************/
#define MLCICR_WPROT (1 << 3)
#define MLCICR_LARGEBLOCK (1 << 2)
#define MLCICR_LONGADDR (1 << 1)
#define MLCICR_16BIT (1 << 0) /* unsupported by LPC32x0! */
/**********************************************************************
* MLC_TIME_REG bit definitions
**********************************************************************/
#define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24)
#define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19)
#define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16)
#define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12)
#define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8)
#define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4)
#define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0)
/**********************************************************************
* MLC_IRQ_MR and MLC_IRQ_SR bit definitions
**********************************************************************/
#define MLCIRQ_NAND_READY (1 << 5)
#define MLCIRQ_CONTROLLER_READY (1 << 4)
#define MLCIRQ_DECODE_FAILURE (1 << 3)
#define MLCIRQ_DECODE_ERROR (1 << 2)
#define MLCIRQ_ECC_READY (1 << 1)
#define MLCIRQ_WRPROT_FAULT (1 << 0)
/**********************************************************************
* MLC_LOCK_PR bit definitions
**********************************************************************/
#define MLCLOCKPR_MAGIC 0xA25E
/**********************************************************************
* MLC_ISR bit definitions
**********************************************************************/
#define MLCISR_DECODER_FAILURE (1 << 6)
#define MLCISR_ERRORS ((1 << 4) | (1 << 5))
#define MLCISR_ERRORS_DETECTED (1 << 3)
#define MLCISR_ECC_READY (1 << 2)
#define MLCISR_CONTROLLER_READY (1 << 1)
#define MLCISR_NAND_READY (1 << 0)
/**********************************************************************
* MLC_CEH bit definitions
**********************************************************************/
#define MLCCEH_NORMAL (1 << 0)
struct lpc32xx_nand_cfg_mlc {
uint32_t tcea_delay;
uint32_t busy_delay;
uint32_t nand_ta;
uint32_t rd_high;
uint32_t rd_low;
uint32_t wr_high;
uint32_t wr_low;
struct mtd_partition *parts;
unsigned num_parts;
};
static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
if (section >= nand_chip->ecc.steps)
return -ERANGE;
oobregion->offset = ((section + 1) * 16) - nand_chip->ecc.bytes;
oobregion->length = nand_chip->ecc.bytes;
return 0;
}
static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
if (section >= nand_chip->ecc.steps)
return -ERANGE;
oobregion->offset = 16 * section;
oobregion->length = 16 - nand_chip->ecc.bytes;
return 0;
}
static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
.ecc = lpc32xx_ooblayout_ecc,
.free = lpc32xx_ooblayout_free,
};
static struct nand_bbt_descr lpc32xx_nand_bbt = {
.options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
NAND_BBT_WRITE,
.pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
};
static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
.options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
NAND_BBT_WRITE,
.pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
};
struct lpc32xx_nand_host {
struct platform_device *pdev;
struct nand_chip nand_chip;
struct lpc32xx_mlc_platform_data *pdata;
struct clk *clk;
struct gpio_desc *wp_gpio;
void __iomem *io_base;
int irq;
struct lpc32xx_nand_cfg_mlc *ncfg;
struct completion comp_nand;
struct completion comp_controller;
uint32_t llptr;
/*
* Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
*/
dma_addr_t oob_buf_phy;
/*
* Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
*/
uint8_t *oob_buf;
/* Physical address of DMA base address */
dma_addr_t io_base_phy;
struct completion comp_dma;
struct dma_chan *dma_chan;
struct dma_slave_config dma_slave_config;
struct scatterlist sgl;
uint8_t *dma_buf;
uint8_t *dummy_buf;
int mlcsubpages; /* number of 512bytes-subpages */
};
/*
* Activate/Deactivate DMA Operation:
*
* Using the PL080 DMA Controller for transferring the 512 byte subpages
* instead of doing readl() / writel() in a loop slows it down significantly.
* Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
*
* - readl() of 128 x 32 bits in a loop: ~20us
* - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
* - DMA read of 512 bytes (32 bit, no bursts): ~100us
*
* This applies to the transfer itself. In the DMA case: only the
* wait_for_completion() (DMA setup _not_ included).
*
* Note that the 512 bytes subpage transfer is done directly from/to a
* FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
* 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
* controller transferring data between its internal buffer to/from the NAND
* chip.)
*
* Therefore, using the PL080 DMA is disabled by default, for now.
*
*/
static int use_dma;
static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
{
uint32_t clkrate, tmp;
/* Reset MLC controller */
writel(MLCCMD_RESET, MLC_CMD(host->io_base));
udelay(1000);
/* Get base clock for MLC block */
clkrate = clk_get_rate(host->clk);
if (clkrate == 0)
clkrate = 104000000;
/* Unlock MLC_ICR
* (among others, will be locked again automatically) */
writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
/* Configure MLC Controller: Large Block, 5 Byte Address */
tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
writel(tmp, MLC_ICR(host->io_base));
/* Unlock MLC_TIME_REG
* (among others, will be locked again automatically) */
writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
/* Compute clock setup values, see LPC and NAND manual */
tmp = 0;
tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
writel(tmp, MLC_TIME_REG(host->io_base));
/* Enable IRQ for CONTROLLER_READY and NAND_READY */
writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
MLC_IRQ_MR(host->io_base));
/* Normal nCE operation: nCE controlled by controller */
writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
}
/*
* Hardware specific access to control lines
*/
static void lpc32xx_nand_cmd_ctrl(struct nand_chip *nand_chip, int cmd,
unsigned int ctrl)
{
struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
if (cmd != NAND_CMD_NONE) {
if (ctrl & NAND_CLE)
writel(cmd, MLC_CMD(host->io_base));
else
writel(cmd, MLC_ADDR(host->io_base));
}
}
/*
* Read Device Ready (NAND device _and_ controller ready)
*/
static int lpc32xx_nand_device_ready(struct nand_chip *nand_chip)
{
struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
if ((readb(MLC_ISR(host->io_base)) &
(MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
(MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
return 1;
return 0;
}
static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
{
uint8_t sr;
/* Clear interrupt flag by reading status */
sr = readb(MLC_IRQ_SR(host->io_base));
if (sr & MLCIRQ_NAND_READY)
complete(&host->comp_nand);
if (sr & MLCIRQ_CONTROLLER_READY)
complete(&host->comp_controller);
return IRQ_HANDLED;
}
static int lpc32xx_waitfunc_nand(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
goto exit;
wait_for_completion(&host->comp_nand);
while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
/* Seems to be delayed sometimes by controller */
dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
cpu_relax();
}
exit:
return NAND_STATUS_READY;
}
static int lpc32xx_waitfunc_controller(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
goto exit;
wait_for_completion(&host->comp_controller);
while (!(readb(MLC_ISR(host->io_base)) &
MLCISR_CONTROLLER_READY)) {
dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
cpu_relax();
}
exit:
return NAND_STATUS_READY;
}
static int lpc32xx_waitfunc(struct nand_chip *chip)
{
lpc32xx_waitfunc_nand(chip);
lpc32xx_waitfunc_controller(chip);
return NAND_STATUS_READY;
}
/*
* Enable NAND write protect
*/
static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
{
if (host->wp_gpio)
gpiod_set_value_cansleep(host->wp_gpio, 1);
}
/*
* Disable NAND write protect
*/
static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
{
if (host->wp_gpio)
gpiod_set_value_cansleep(host->wp_gpio, 0);
}
static void lpc32xx_dma_complete_func(void *completion)
{
complete(completion);
}
static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
enum dma_transfer_direction dir)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
struct dma_async_tx_descriptor *desc;
int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
int res;
sg_init_one(&host->sgl, mem, len);
res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
DMA_BIDIRECTIONAL);
if (res != 1) {
dev_err(mtd->dev.parent, "Failed to map sg list\n");
return -ENXIO;
}
desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
flags);
if (!desc) {
dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
goto out1;
}
init_completion(&host->comp_dma);
desc->callback = lpc32xx_dma_complete_func;
desc->callback_param = &host->comp_dma;
dmaengine_submit(desc);
dma_async_issue_pending(host->dma_chan);
wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
DMA_BIDIRECTIONAL);
return 0;
out1:
dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
DMA_BIDIRECTIONAL);
return -ENXIO;
}
static int lpc32xx_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
int i, j;
uint8_t *oobbuf = chip->oob_poi;
uint32_t mlc_isr;
int res;
uint8_t *dma_buf;
bool dma_mapped;
if ((void *)buf <= high_memory) {
dma_buf = buf;
dma_mapped = true;
} else {
dma_buf = host->dma_buf;
dma_mapped = false;
}
/* Writing Command and Address */
nand_read_page_op(chip, page, 0, NULL, 0);
/* For all sub-pages */
for (i = 0; i < host->mlcsubpages; i++) {
/* Start Auto Decode Command */
writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
/* Wait for Controller Ready */
lpc32xx_waitfunc_controller(chip);
/* Check ECC Error status */
mlc_isr = readl(MLC_ISR(host->io_base));
if (mlc_isr & MLCISR_DECODER_FAILURE) {
mtd->ecc_stats.failed++;
dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
} else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
}
/* Read 512 + 16 Bytes */
if (use_dma) {
res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
DMA_DEV_TO_MEM);
if (res)
return res;
} else {
for (j = 0; j < (512 >> 2); j++) {
*((uint32_t *)(buf)) =
readl(MLC_BUFF(host->io_base));
buf += 4;
}
}
for (j = 0; j < (16 >> 2); j++) {
*((uint32_t *)(oobbuf)) =
readl(MLC_BUFF(host->io_base));
oobbuf += 4;
}
}
if (use_dma && !dma_mapped)
memcpy(buf, dma_buf, mtd->writesize);
return 0;
}
static int lpc32xx_write_page_lowlevel(struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
const uint8_t *oobbuf = chip->oob_poi;
uint8_t *dma_buf = (uint8_t *)buf;
int res;
int i, j;
if (use_dma && (void *)buf >= high_memory) {
dma_buf = host->dma_buf;
memcpy(dma_buf, buf, mtd->writesize);
}
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
for (i = 0; i < host->mlcsubpages; i++) {
/* Start Encode */
writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
/* Write 512 + 6 Bytes to Buffer */
if (use_dma) {
res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
DMA_MEM_TO_DEV);
if (res)
return res;
} else {
for (j = 0; j < (512 >> 2); j++) {
writel(*((uint32_t *)(buf)),
MLC_BUFF(host->io_base));
buf += 4;
}
}
writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
oobbuf += 4;
writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
oobbuf += 12;
/* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
/* Wait for Controller Ready */
lpc32xx_waitfunc_controller(chip);
}
return nand_prog_page_end_op(chip);
}
static int lpc32xx_read_oob(struct nand_chip *chip, int page)
{
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
/* Read whole page - necessary with MLC controller! */
lpc32xx_read_page(chip, host->dummy_buf, 1, page);
return 0;
}
static int lpc32xx_write_oob(struct nand_chip *chip, int page)
{
/* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
return 0;
}
/* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
static void lpc32xx_ecc_enable(struct nand_chip *chip, int mode)
{
/* Always enabled! */
}
static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
{
struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
dma_cap_mask_t mask;
if (!host->pdata || !host->pdata->dma_filter) {
dev_err(mtd->dev.parent, "no DMA platform data\n");
return -ENOENT;
}
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
"nand-mlc");
if (!host->dma_chan) {
dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
return -EBUSY;
}
/*
* Set direction to a sensible value even if the dmaengine driver
* should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
* driver criticizes it as "alien transfer direction".
*/
host->dma_slave_config.direction = DMA_DEV_TO_MEM;
host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
host->dma_slave_config.src_maxburst = 128;
host->dma_slave_config.dst_maxburst = 128;
/* DMA controller does flow control: */
host->dma_slave_config.device_fc = false;
host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
goto out1;
}
return 0;
out1:
dma_release_channel(host->dma_chan);
return -ENXIO;
}
static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
{
struct lpc32xx_nand_cfg_mlc *ncfg;
struct device_node *np = dev->of_node;
ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
if (!ncfg)
return NULL;
of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
!ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
!ncfg->wr_low) {
dev_err(dev, "chip parameters not specified correctly\n");
return NULL;
}
return ncfg;
}
static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
struct device *dev = &host->pdev->dev;
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return 0;
host->dma_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
if (!host->dma_buf)
return -ENOMEM;
host->dummy_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
if (!host->dummy_buf)
return -ENOMEM;
chip->ecc.size = 512;
chip->ecc.hwctl = lpc32xx_ecc_enable;
chip->ecc.read_page_raw = lpc32xx_read_page;
chip->ecc.read_page = lpc32xx_read_page;
chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
chip->ecc.write_page = lpc32xx_write_page_lowlevel;
chip->ecc.write_oob = lpc32xx_write_oob;
chip->ecc.read_oob = lpc32xx_read_oob;
chip->ecc.strength = 4;
chip->ecc.bytes = 10;
mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
host->mlcsubpages = mtd->writesize / 512;
return 0;
}
static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
.attach_chip = lpc32xx_nand_attach_chip,
};
/*
* Probe for NAND controller
*/
static int lpc32xx_nand_probe(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
struct resource *rc;
int res;
/* Allocate memory for the device structure (and zero it) */
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
host->pdev = pdev;
host->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &rc);
if (IS_ERR(host->io_base))
return PTR_ERR(host->io_base);
host->io_base_phy = rc->start;
nand_chip = &host->nand_chip;
mtd = nand_to_mtd(nand_chip);
if (pdev->dev.of_node)
host->ncfg = lpc32xx_parse_dt(&pdev->dev);
if (!host->ncfg) {
dev_err(&pdev->dev,
"Missing or bad NAND config from device tree\n");
return -ENOENT;
}
/* Start with WP disabled, if available */
host->wp_gpio = gpiod_get_optional(&pdev->dev, NULL, GPIOD_OUT_LOW);
res = PTR_ERR_OR_ZERO(host->wp_gpio);
if (res) {
if (res != -EPROBE_DEFER)
dev_err(&pdev->dev, "WP GPIO is not available: %d\n",
res);
return res;
}
gpiod_set_consumer_name(host->wp_gpio, "NAND WP");
host->pdata = dev_get_platdata(&pdev->dev);
/* link the private data structures */
nand_set_controller_data(nand_chip, host);
nand_set_flash_node(nand_chip, pdev->dev.of_node);
mtd->dev.parent = &pdev->dev;
/* Get NAND clock */
host->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "Clock initialization failure\n");
res = -ENOENT;
goto free_gpio;
}
res = clk_prepare_enable(host->clk);
if (res)
goto put_clk;
nand_chip->legacy.cmd_ctrl = lpc32xx_nand_cmd_ctrl;
nand_chip->legacy.dev_ready = lpc32xx_nand_device_ready;
nand_chip->legacy.chip_delay = 25; /* us */
nand_chip->legacy.IO_ADDR_R = MLC_DATA(host->io_base);
nand_chip->legacy.IO_ADDR_W = MLC_DATA(host->io_base);
/* Init NAND controller */
lpc32xx_nand_setup(host);
platform_set_drvdata(pdev, host);
/* Initialize function pointers */
nand_chip->legacy.waitfunc = lpc32xx_waitfunc;
nand_chip->options = NAND_NO_SUBPAGE_WRITE;
nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
nand_chip->bbt_td = &lpc32xx_nand_bbt;
nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
if (use_dma) {
res = lpc32xx_dma_setup(host);
if (res) {
res = -EIO;
goto unprepare_clk;
}
}
/* initially clear interrupt status */
readb(MLC_IRQ_SR(host->io_base));
init_completion(&host->comp_nand);
init_completion(&host->comp_controller);
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
res = -EINVAL;
goto release_dma_chan;
}
if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
res = -ENXIO;
goto release_dma_chan;
}
/*
* Scan to find existence of the device and get the type of NAND device:
* SMALL block or LARGE block.
*/
nand_chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
res = nand_scan(nand_chip, 1);
if (res)
goto free_irq;
mtd->name = DRV_NAME;
res = mtd_device_register(mtd, host->ncfg->parts,
host->ncfg->num_parts);
if (res)
goto cleanup_nand;
return 0;
cleanup_nand:
nand_cleanup(nand_chip);
free_irq:
free_irq(host->irq, host);
release_dma_chan:
if (use_dma)
dma_release_channel(host->dma_chan);
unprepare_clk:
clk_disable_unprepare(host->clk);
put_clk:
clk_put(host->clk);
free_gpio:
lpc32xx_wp_enable(host);
gpiod_put(host->wp_gpio);
return res;
}
/*
* Remove NAND device
*/
static void lpc32xx_nand_remove(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
struct nand_chip *chip = &host->nand_chip;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
free_irq(host->irq, host);
if (use_dma)
dma_release_channel(host->dma_chan);
clk_disable_unprepare(host->clk);
clk_put(host->clk);
lpc32xx_wp_enable(host);
gpiod_put(host->wp_gpio);
}
static int lpc32xx_nand_resume(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
int ret;
/* Re-enable NAND clock */
ret = clk_prepare_enable(host->clk);
if (ret)
return ret;
/* Fresh init of NAND controller */
lpc32xx_nand_setup(host);
/* Disable write protect */
lpc32xx_wp_disable(host);
return 0;
}
static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
{
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
/* Enable write protect for safety */
lpc32xx_wp_enable(host);
/* Disable clock */
clk_disable_unprepare(host->clk);
return 0;
}
static const struct of_device_id lpc32xx_nand_match[] = {
{ .compatible = "nxp,lpc3220-mlc" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
static struct platform_driver lpc32xx_nand_driver = {
.probe = lpc32xx_nand_probe,
.remove_new = lpc32xx_nand_remove,
.resume = pm_ptr(lpc32xx_nand_resume),
.suspend = pm_ptr(lpc32xx_nand_suspend),
.driver = {
.name = DRV_NAME,
.of_match_table = lpc32xx_nand_match,
},
};
module_platform_driver(lpc32xx_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Roland Stigge <[email protected]>");
MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");
| linux-master | drivers/mtd/nand/raw/lpc32xx_mlc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Error Location Module
*
* Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
*/
#define DRIVER_NAME "omap-elm"
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/sched.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/elm.h>
#define ELM_SYSCONFIG 0x010
#define ELM_IRQSTATUS 0x018
#define ELM_IRQENABLE 0x01c
#define ELM_LOCATION_CONFIG 0x020
#define ELM_PAGE_CTRL 0x080
#define ELM_SYNDROME_FRAGMENT_0 0x400
#define ELM_SYNDROME_FRAGMENT_1 0x404
#define ELM_SYNDROME_FRAGMENT_2 0x408
#define ELM_SYNDROME_FRAGMENT_3 0x40c
#define ELM_SYNDROME_FRAGMENT_4 0x410
#define ELM_SYNDROME_FRAGMENT_5 0x414
#define ELM_SYNDROME_FRAGMENT_6 0x418
#define ELM_LOCATION_STATUS 0x800
#define ELM_ERROR_LOCATION_0 0x880
/* ELM Interrupt Status Register */
#define INTR_STATUS_PAGE_VALID BIT(8)
/* ELM Interrupt Enable Register */
#define INTR_EN_PAGE_MASK BIT(8)
/* ELM Location Configuration Register */
#define ECC_BCH_LEVEL_MASK 0x3
/* ELM syndrome */
#define ELM_SYNDROME_VALID BIT(16)
/* ELM_LOCATION_STATUS Register */
#define ECC_CORRECTABLE_MASK BIT(8)
#define ECC_NB_ERRORS_MASK 0x1f
/* ELM_ERROR_LOCATION_0-15 Registers */
#define ECC_ERROR_LOCATION_MASK 0x1fff
#define ELM_ECC_SIZE 0x7ff
#define SYNDROME_FRAGMENT_REG_SIZE 0x40
#define ERROR_LOCATION_SIZE 0x100
struct elm_registers {
u32 elm_irqenable;
u32 elm_sysconfig;
u32 elm_location_config;
u32 elm_page_ctrl;
u32 elm_syndrome_fragment_6[ERROR_VECTOR_MAX];
u32 elm_syndrome_fragment_5[ERROR_VECTOR_MAX];
u32 elm_syndrome_fragment_4[ERROR_VECTOR_MAX];
u32 elm_syndrome_fragment_3[ERROR_VECTOR_MAX];
u32 elm_syndrome_fragment_2[ERROR_VECTOR_MAX];
u32 elm_syndrome_fragment_1[ERROR_VECTOR_MAX];
u32 elm_syndrome_fragment_0[ERROR_VECTOR_MAX];
};
struct elm_info {
struct device *dev;
void __iomem *elm_base;
struct completion elm_completion;
struct list_head list;
enum bch_ecc bch_type;
struct elm_registers elm_regs;
int ecc_steps;
int ecc_syndrome_size;
};
static LIST_HEAD(elm_devices);
static void elm_write_reg(struct elm_info *info, int offset, u32 val)
{
writel(val, info->elm_base + offset);
}
static u32 elm_read_reg(struct elm_info *info, int offset)
{
return readl(info->elm_base + offset);
}
/**
* elm_config - Configure ELM module
* @dev: ELM device
* @bch_type: Type of BCH ecc
* @ecc_steps: ECC steps to assign to config
* @ecc_step_size: ECC step size to assign to config
* @ecc_syndrome_size: ECC syndrome size to assign to config
*/
int elm_config(struct device *dev, enum bch_ecc bch_type,
int ecc_steps, int ecc_step_size, int ecc_syndrome_size)
{
u32 reg_val;
struct elm_info *info = dev_get_drvdata(dev);
if (!info) {
dev_err(dev, "Unable to configure elm - device not probed?\n");
return -EPROBE_DEFER;
}
/* ELM cannot detect ECC errors for chunks > 1KB */
if (ecc_step_size > ((ELM_ECC_SIZE + 1) / 2)) {
dev_err(dev, "unsupported config ecc-size=%d\n", ecc_step_size);
return -EINVAL;
}
/* ELM support 8 error syndrome process */
if (ecc_steps > ERROR_VECTOR_MAX && ecc_steps % ERROR_VECTOR_MAX) {
dev_err(dev, "unsupported config ecc-step=%d\n", ecc_steps);
return -EINVAL;
}
reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
info->bch_type = bch_type;
info->ecc_steps = ecc_steps;
info->ecc_syndrome_size = ecc_syndrome_size;
return 0;
}
EXPORT_SYMBOL(elm_config);
/**
* elm_configure_page_mode - Enable/Disable page mode
* @info: elm info
* @index: index number of syndrome fragment vector
* @enable: enable/disable flag for page mode
*
* Enable page mode for syndrome fragment index
*/
static void elm_configure_page_mode(struct elm_info *info, int index,
bool enable)
{
u32 reg_val;
reg_val = elm_read_reg(info, ELM_PAGE_CTRL);
if (enable)
reg_val |= BIT(index); /* enable page mode */
else
reg_val &= ~BIT(index); /* disable page mode */
elm_write_reg(info, ELM_PAGE_CTRL, reg_val);
}
/**
* elm_load_syndrome - Load ELM syndrome reg
* @info: elm info
* @err_vec: elm error vectors
* @ecc: buffer with calculated ecc
*
* Load syndrome fragment registers with calculated ecc in reverse order.
*/
static void elm_load_syndrome(struct elm_info *info,
struct elm_errorvec *err_vec, u8 *ecc)
{
int i, offset;
u32 val;
for (i = 0; i < info->ecc_steps; i++) {
/* Check error reported */
if (err_vec[i].error_reported) {
elm_configure_page_mode(info, i, true);
offset = ELM_SYNDROME_FRAGMENT_0 +
SYNDROME_FRAGMENT_REG_SIZE * i;
switch (info->bch_type) {
case BCH8_ECC:
/* syndrome fragment 0 = ecc[9-12B] */
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[9]);
elm_write_reg(info, offset, val);
/* syndrome fragment 1 = ecc[5-8B] */
offset += 4;
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[5]);
elm_write_reg(info, offset, val);
/* syndrome fragment 2 = ecc[1-4B] */
offset += 4;
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[1]);
elm_write_reg(info, offset, val);
/* syndrome fragment 3 = ecc[0B] */
offset += 4;
val = ecc[0];
elm_write_reg(info, offset, val);
break;
case BCH4_ECC:
/* syndrome fragment 0 = ecc[20-52b] bits */
val = ((__force u32)cpu_to_be32(*(u32 *)&ecc[3]) >> 4) |
((ecc[2] & 0xf) << 28);
elm_write_reg(info, offset, val);
/* syndrome fragment 1 = ecc[0-20b] bits */
offset += 4;
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 12;
elm_write_reg(info, offset, val);
break;
case BCH16_ECC:
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[22]);
elm_write_reg(info, offset, val);
offset += 4;
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[18]);
elm_write_reg(info, offset, val);
offset += 4;
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[14]);
elm_write_reg(info, offset, val);
offset += 4;
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[10]);
elm_write_reg(info, offset, val);
offset += 4;
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[6]);
elm_write_reg(info, offset, val);
offset += 4;
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[2]);
elm_write_reg(info, offset, val);
offset += 4;
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 16;
elm_write_reg(info, offset, val);
break;
default:
pr_err("invalid config bch_type\n");
}
}
/* Update ecc pointer with ecc byte size */
ecc += info->ecc_syndrome_size;
}
}
/**
* elm_start_processing - start elm syndrome processing
* @info: elm info
* @err_vec: elm error vectors
*
* Set syndrome valid bit for syndrome fragment registers for which
* elm syndrome fragment registers are loaded. This enables elm module
* to start processing syndrome vectors.
*/
static void elm_start_processing(struct elm_info *info,
struct elm_errorvec *err_vec)
{
int i, offset;
u32 reg_val;
/*
* Set syndrome vector valid, so that ELM module
* will process it for vectors error is reported
*/
for (i = 0; i < info->ecc_steps; i++) {
if (err_vec[i].error_reported) {
offset = ELM_SYNDROME_FRAGMENT_6 +
SYNDROME_FRAGMENT_REG_SIZE * i;
reg_val = elm_read_reg(info, offset);
reg_val |= ELM_SYNDROME_VALID;
elm_write_reg(info, offset, reg_val);
}
}
}
/**
* elm_error_correction - locate correctable error position
* @info: elm info
* @err_vec: elm error vectors
*
* On completion of processing by elm module, error location status
* register updated with correctable/uncorrectable error information.
* In case of correctable errors, number of errors located from
* elm location status register & read the positions from
* elm error location register.
*/
static void elm_error_correction(struct elm_info *info,
struct elm_errorvec *err_vec)
{
int i, j;
int offset;
u32 reg_val;
for (i = 0; i < info->ecc_steps; i++) {
/* Check error reported */
if (err_vec[i].error_reported) {
offset = ELM_LOCATION_STATUS + ERROR_LOCATION_SIZE * i;
reg_val = elm_read_reg(info, offset);
/* Check correctable error or not */
if (reg_val & ECC_CORRECTABLE_MASK) {
offset = ELM_ERROR_LOCATION_0 +
ERROR_LOCATION_SIZE * i;
/* Read count of correctable errors */
err_vec[i].error_count = reg_val &
ECC_NB_ERRORS_MASK;
/* Update the error locations in error vector */
for (j = 0; j < err_vec[i].error_count; j++) {
reg_val = elm_read_reg(info, offset);
err_vec[i].error_loc[j] = reg_val &
ECC_ERROR_LOCATION_MASK;
/* Update error location register */
offset += 4;
}
} else {
err_vec[i].error_uncorrectable = true;
}
/* Clearing interrupts for processed error vectors */
elm_write_reg(info, ELM_IRQSTATUS, BIT(i));
/* Disable page mode */
elm_configure_page_mode(info, i, false);
}
}
}
/**
* elm_decode_bch_error_page - Locate error position
* @dev: device pointer
* @ecc_calc: calculated ECC bytes from GPMC
* @err_vec: elm error vectors
*
* Called with one or more error reported vectors & vectors with
* error reported is updated in err_vec[].error_reported
*/
void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
struct elm_errorvec *err_vec)
{
struct elm_info *info = dev_get_drvdata(dev);
u32 reg_val;
/* Enable page mode interrupt */
reg_val = elm_read_reg(info, ELM_IRQSTATUS);
elm_write_reg(info, ELM_IRQSTATUS, reg_val & INTR_STATUS_PAGE_VALID);
elm_write_reg(info, ELM_IRQENABLE, INTR_EN_PAGE_MASK);
/* Load valid ecc byte to syndrome fragment register */
elm_load_syndrome(info, err_vec, ecc_calc);
/* Enable syndrome processing for which syndrome fragment is updated */
elm_start_processing(info, err_vec);
/* Wait for ELM module to finish locating error correction */
wait_for_completion(&info->elm_completion);
/* Disable page mode interrupt */
reg_val = elm_read_reg(info, ELM_IRQENABLE);
elm_write_reg(info, ELM_IRQENABLE, reg_val & ~INTR_EN_PAGE_MASK);
elm_error_correction(info, err_vec);
}
EXPORT_SYMBOL(elm_decode_bch_error_page);
static irqreturn_t elm_isr(int this_irq, void *dev_id)
{
u32 reg_val;
struct elm_info *info = dev_id;
reg_val = elm_read_reg(info, ELM_IRQSTATUS);
/* All error vectors processed */
if (reg_val & INTR_STATUS_PAGE_VALID) {
elm_write_reg(info, ELM_IRQSTATUS,
reg_val & INTR_STATUS_PAGE_VALID);
complete(&info->elm_completion);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int elm_probe(struct platform_device *pdev)
{
int ret = 0;
struct elm_info *info;
int irq;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = &pdev->dev;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
info->elm_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->elm_base))
return PTR_ERR(info->elm_base);
ret = devm_request_irq(&pdev->dev, irq, elm_isr, 0,
pdev->name, info);
if (ret) {
dev_err(&pdev->dev, "failure requesting %d\n", irq);
return ret;
}
pm_runtime_enable(&pdev->dev);
if (pm_runtime_get_sync(&pdev->dev) < 0) {
ret = -EINVAL;
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
dev_err(&pdev->dev, "can't enable clock\n");
return ret;
}
init_completion(&info->elm_completion);
INIT_LIST_HEAD(&info->list);
list_add(&info->list, &elm_devices);
platform_set_drvdata(pdev, info);
return ret;
}
static void elm_remove(struct platform_device *pdev)
{
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
#ifdef CONFIG_PM_SLEEP
/*
* elm_context_save
* saves ELM configurations to preserve them across Hardware powered-down
*/
static int elm_context_save(struct elm_info *info)
{
struct elm_registers *regs = &info->elm_regs;
enum bch_ecc bch_type = info->bch_type;
u32 offset = 0, i;
regs->elm_irqenable = elm_read_reg(info, ELM_IRQENABLE);
regs->elm_sysconfig = elm_read_reg(info, ELM_SYSCONFIG);
regs->elm_location_config = elm_read_reg(info, ELM_LOCATION_CONFIG);
regs->elm_page_ctrl = elm_read_reg(info, ELM_PAGE_CTRL);
for (i = 0; i < ERROR_VECTOR_MAX; i++) {
offset = i * SYNDROME_FRAGMENT_REG_SIZE;
switch (bch_type) {
case BCH16_ECC:
regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_6 + offset);
regs->elm_syndrome_fragment_5[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_5 + offset);
regs->elm_syndrome_fragment_4[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_4 + offset);
fallthrough;
case BCH8_ECC:
regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_3 + offset);
regs->elm_syndrome_fragment_2[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_2 + offset);
fallthrough;
case BCH4_ECC:
regs->elm_syndrome_fragment_1[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_1 + offset);
regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_0 + offset);
break;
default:
return -EINVAL;
}
/* ELM SYNDROME_VALID bit in SYNDROME_FRAGMENT_6[] needs
* to be saved for all BCH schemes*/
regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
ELM_SYNDROME_FRAGMENT_6 + offset);
}
return 0;
}
/*
* elm_context_restore
* writes configurations saved duing power-down back into ELM registers
*/
static int elm_context_restore(struct elm_info *info)
{
struct elm_registers *regs = &info->elm_regs;
enum bch_ecc bch_type = info->bch_type;
u32 offset = 0, i;
elm_write_reg(info, ELM_IRQENABLE, regs->elm_irqenable);
elm_write_reg(info, ELM_SYSCONFIG, regs->elm_sysconfig);
elm_write_reg(info, ELM_LOCATION_CONFIG, regs->elm_location_config);
elm_write_reg(info, ELM_PAGE_CTRL, regs->elm_page_ctrl);
for (i = 0; i < ERROR_VECTOR_MAX; i++) {
offset = i * SYNDROME_FRAGMENT_REG_SIZE;
switch (bch_type) {
case BCH16_ECC:
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
regs->elm_syndrome_fragment_6[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_5 + offset,
regs->elm_syndrome_fragment_5[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset,
regs->elm_syndrome_fragment_4[i]);
fallthrough;
case BCH8_ECC:
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
regs->elm_syndrome_fragment_3[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_2 + offset,
regs->elm_syndrome_fragment_2[i]);
fallthrough;
case BCH4_ECC:
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_1 + offset,
regs->elm_syndrome_fragment_1[i]);
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
regs->elm_syndrome_fragment_0[i]);
break;
default:
return -EINVAL;
}
/* ELM_SYNDROME_VALID bit to be set in last to trigger FSM */
elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
regs->elm_syndrome_fragment_6[i] &
ELM_SYNDROME_VALID);
}
return 0;
}
static int elm_suspend(struct device *dev)
{
struct elm_info *info = dev_get_drvdata(dev);
elm_context_save(info);
pm_runtime_put_sync(dev);
return 0;
}
static int elm_resume(struct device *dev)
{
struct elm_info *info = dev_get_drvdata(dev);
pm_runtime_get_sync(dev);
elm_context_restore(info);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(elm_pm_ops, elm_suspend, elm_resume);
#ifdef CONFIG_OF
static const struct of_device_id elm_of_match[] = {
{ .compatible = "ti,am3352-elm" },
{ .compatible = "ti,am64-elm" },
{},
};
MODULE_DEVICE_TABLE(of, elm_of_match);
#endif
static struct platform_driver elm_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(elm_of_match),
.pm = &elm_pm_ops,
},
.probe = elm_probe,
.remove_new = elm_remove,
};
module_platform_driver(elm_driver);
MODULE_DESCRIPTION("ELM driver for BCH error correction");
MODULE_AUTHOR("Texas Instruments");
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mtd/nand/raw/omap_elm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include "internals.h"
static int
sdtnqgama_choose_interface_config(struct nand_chip *chip,
struct nand_interface_config *iface)
{
onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 0);
return nand_choose_best_sdr_timings(chip, iface, NULL);
}
static int sandisk_nand_init(struct nand_chip *chip)
{
if (!strncmp("SDTNQGAMA", chip->parameters.model,
sizeof("SDTNQGAMA") - 1))
chip->ops.choose_interface_config =
&sdtnqgama_choose_interface_config;
return 0;
}
const struct nand_manufacturer_ops sandisk_nand_manuf_ops = {
.init = sandisk_nand_init,
};
| linux-master | drivers/mtd/nand/raw/nand_sandisk.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Free Electrons
*
* Author: Boris BREZILLON <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/export.h>
#include "internals.h"
#define ONFI_DYN_TIMING_MAX U16_MAX
/*
* For non-ONFI chips we use the highest possible value for tPROG and tBERS.
* tR and tCCS will take the default values precised in the ONFI specification
* for timing mode 0, respectively 200us and 500ns.
*
* These four values are tweaked to be more accurate in the case of ONFI chips.
*/
static const struct nand_interface_config onfi_sdr_timings[] = {
/* Mode 0 */
{
.type = NAND_SDR_IFACE,
.timings.mode = 0,
.timings.sdr = {
.tCCS_min = 500000,
.tR_max = 200000000,
.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tADL_min = 400000,
.tALH_min = 20000,
.tALS_min = 50000,
.tAR_min = 25000,
.tCEA_max = 100000,
.tCEH_min = 20000,
.tCH_min = 20000,
.tCHZ_max = 100000,
.tCLH_min = 20000,
.tCLR_min = 20000,
.tCLS_min = 50000,
.tCOH_min = 0,
.tCS_min = 70000,
.tDH_min = 20000,
.tDS_min = 40000,
.tFEAT_max = 1000000,
.tIR_min = 10000,
.tITC_max = 1000000,
.tRC_min = 100000,
.tREA_max = 40000,
.tREH_min = 30000,
.tRHOH_min = 0,
.tRHW_min = 200000,
.tRHZ_max = 200000,
.tRLOH_min = 0,
.tRP_min = 50000,
.tRR_min = 40000,
.tRST_max = 250000000000ULL,
.tWB_max = 200000,
.tWC_min = 100000,
.tWH_min = 30000,
.tWHR_min = 120000,
.tWP_min = 50000,
.tWW_min = 100000,
},
},
/* Mode 1 */
{
.type = NAND_SDR_IFACE,
.timings.mode = 1,
.timings.sdr = {
.tCCS_min = 500000,
.tR_max = 200000000,
.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tADL_min = 400000,
.tALH_min = 10000,
.tALS_min = 25000,
.tAR_min = 10000,
.tCEA_max = 45000,
.tCEH_min = 20000,
.tCH_min = 10000,
.tCHZ_max = 50000,
.tCLH_min = 10000,
.tCLR_min = 10000,
.tCLS_min = 25000,
.tCOH_min = 15000,
.tCS_min = 35000,
.tDH_min = 10000,
.tDS_min = 20000,
.tFEAT_max = 1000000,
.tIR_min = 0,
.tITC_max = 1000000,
.tRC_min = 50000,
.tREA_max = 30000,
.tREH_min = 15000,
.tRHOH_min = 15000,
.tRHW_min = 100000,
.tRHZ_max = 100000,
.tRLOH_min = 0,
.tRP_min = 25000,
.tRR_min = 20000,
.tRST_max = 500000000,
.tWB_max = 100000,
.tWC_min = 45000,
.tWH_min = 15000,
.tWHR_min = 80000,
.tWP_min = 25000,
.tWW_min = 100000,
},
},
/* Mode 2 */
{
.type = NAND_SDR_IFACE,
.timings.mode = 2,
.timings.sdr = {
.tCCS_min = 500000,
.tR_max = 200000000,
.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tADL_min = 400000,
.tALH_min = 10000,
.tALS_min = 15000,
.tAR_min = 10000,
.tCEA_max = 30000,
.tCEH_min = 20000,
.tCH_min = 10000,
.tCHZ_max = 50000,
.tCLH_min = 10000,
.tCLR_min = 10000,
.tCLS_min = 15000,
.tCOH_min = 15000,
.tCS_min = 25000,
.tDH_min = 5000,
.tDS_min = 15000,
.tFEAT_max = 1000000,
.tIR_min = 0,
.tITC_max = 1000000,
.tRC_min = 35000,
.tREA_max = 25000,
.tREH_min = 15000,
.tRHOH_min = 15000,
.tRHW_min = 100000,
.tRHZ_max = 100000,
.tRLOH_min = 0,
.tRR_min = 20000,
.tRST_max = 500000000,
.tWB_max = 100000,
.tRP_min = 17000,
.tWC_min = 35000,
.tWH_min = 15000,
.tWHR_min = 80000,
.tWP_min = 17000,
.tWW_min = 100000,
},
},
/* Mode 3 */
{
.type = NAND_SDR_IFACE,
.timings.mode = 3,
.timings.sdr = {
.tCCS_min = 500000,
.tR_max = 200000000,
.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tADL_min = 400000,
.tALH_min = 5000,
.tALS_min = 10000,
.tAR_min = 10000,
.tCEA_max = 25000,
.tCEH_min = 20000,
.tCH_min = 5000,
.tCHZ_max = 50000,
.tCLH_min = 5000,
.tCLR_min = 10000,
.tCLS_min = 10000,
.tCOH_min = 15000,
.tCS_min = 25000,
.tDH_min = 5000,
.tDS_min = 10000,
.tFEAT_max = 1000000,
.tIR_min = 0,
.tITC_max = 1000000,
.tRC_min = 30000,
.tREA_max = 20000,
.tREH_min = 10000,
.tRHOH_min = 15000,
.tRHW_min = 100000,
.tRHZ_max = 100000,
.tRLOH_min = 0,
.tRP_min = 15000,
.tRR_min = 20000,
.tRST_max = 500000000,
.tWB_max = 100000,
.tWC_min = 30000,
.tWH_min = 10000,
.tWHR_min = 80000,
.tWP_min = 15000,
.tWW_min = 100000,
},
},
/* Mode 4 */
{
.type = NAND_SDR_IFACE,
.timings.mode = 4,
.timings.sdr = {
.tCCS_min = 500000,
.tR_max = 200000000,
.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tADL_min = 400000,
.tALH_min = 5000,
.tALS_min = 10000,
.tAR_min = 10000,
.tCEA_max = 25000,
.tCEH_min = 20000,
.tCH_min = 5000,
.tCHZ_max = 30000,
.tCLH_min = 5000,
.tCLR_min = 10000,
.tCLS_min = 10000,
.tCOH_min = 15000,
.tCS_min = 20000,
.tDH_min = 5000,
.tDS_min = 10000,
.tFEAT_max = 1000000,
.tIR_min = 0,
.tITC_max = 1000000,
.tRC_min = 25000,
.tREA_max = 20000,
.tREH_min = 10000,
.tRHOH_min = 15000,
.tRHW_min = 100000,
.tRHZ_max = 100000,
.tRLOH_min = 5000,
.tRP_min = 12000,
.tRR_min = 20000,
.tRST_max = 500000000,
.tWB_max = 100000,
.tWC_min = 25000,
.tWH_min = 10000,
.tWHR_min = 80000,
.tWP_min = 12000,
.tWW_min = 100000,
},
},
/* Mode 5 */
{
.type = NAND_SDR_IFACE,
.timings.mode = 5,
.timings.sdr = {
.tCCS_min = 500000,
.tR_max = 200000000,
.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tADL_min = 400000,
.tALH_min = 5000,
.tALS_min = 10000,
.tAR_min = 10000,
.tCEA_max = 25000,
.tCEH_min = 20000,
.tCH_min = 5000,
.tCHZ_max = 30000,
.tCLH_min = 5000,
.tCLR_min = 10000,
.tCLS_min = 10000,
.tCOH_min = 15000,
.tCS_min = 15000,
.tDH_min = 5000,
.tDS_min = 7000,
.tFEAT_max = 1000000,
.tIR_min = 0,
.tITC_max = 1000000,
.tRC_min = 20000,
.tREA_max = 16000,
.tREH_min = 7000,
.tRHOH_min = 15000,
.tRHW_min = 100000,
.tRHZ_max = 100000,
.tRLOH_min = 5000,
.tRP_min = 10000,
.tRR_min = 20000,
.tRST_max = 500000000,
.tWB_max = 100000,
.tWC_min = 20000,
.tWH_min = 7000,
.tWHR_min = 80000,
.tWP_min = 10000,
.tWW_min = 100000,
},
},
};
static const struct nand_interface_config onfi_nvddr_timings[] = {
/* Mode 0 */
{
.type = NAND_NVDDR_IFACE,
.timings.mode = 0,
.timings.nvddr = {
.tCCS_min = 500000,
.tR_max = 200000000,
.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tAC_min = 3000,
.tAC_max = 25000,
.tADL_min = 400000,
.tCAD_min = 45000,
.tCAH_min = 10000,
.tCALH_min = 10000,
.tCALS_min = 10000,
.tCAS_min = 10000,
.tCEH_min = 20000,
.tCH_min = 10000,
.tCK_min = 50000,
.tCS_min = 35000,
.tDH_min = 5000,
.tDQSCK_min = 3000,
.tDQSCK_max = 25000,
.tDQSD_min = 0,
.tDQSD_max = 18000,
.tDQSHZ_max = 20000,
.tDQSQ_max = 5000,
.tDS_min = 5000,
.tDSC_min = 50000,
.tFEAT_max = 1000000,
.tITC_max = 1000000,
.tQHS_max = 6000,
.tRHW_min = 100000,
.tRR_min = 20000,
.tRST_max = 500000000,
.tWB_max = 100000,
.tWHR_min = 80000,
.tWRCK_min = 20000,
.tWW_min = 100000,
},
},
/* Mode 1 */
{
.type = NAND_NVDDR_IFACE,
.timings.mode = 1,
.timings.nvddr = {
.tCCS_min = 500000,
.tR_max = 200000000,
.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tAC_min = 3000,
.tAC_max = 25000,
.tADL_min = 400000,
.tCAD_min = 45000,
.tCAH_min = 5000,
.tCALH_min = 5000,
.tCALS_min = 5000,
.tCAS_min = 5000,
.tCEH_min = 20000,
.tCH_min = 5000,
.tCK_min = 30000,
.tCS_min = 25000,
.tDH_min = 2500,
.tDQSCK_min = 3000,
.tDQSCK_max = 25000,
.tDQSD_min = 0,
.tDQSD_max = 18000,
.tDQSHZ_max = 20000,
.tDQSQ_max = 2500,
.tDS_min = 3000,
.tDSC_min = 30000,
.tFEAT_max = 1000000,
.tITC_max = 1000000,
.tQHS_max = 3000,
.tRHW_min = 100000,
.tRR_min = 20000,
.tRST_max = 500000000,
.tWB_max = 100000,
.tWHR_min = 80000,
.tWRCK_min = 20000,
.tWW_min = 100000,
},
},
/* Mode 2 */
{
.type = NAND_NVDDR_IFACE,
.timings.mode = 2,
.timings.nvddr = {
.tCCS_min = 500000,
.tR_max = 200000000,
.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tAC_min = 3000,
.tAC_max = 25000,
.tADL_min = 400000,
.tCAD_min = 45000,
.tCAH_min = 4000,
.tCALH_min = 4000,
.tCALS_min = 4000,
.tCAS_min = 4000,
.tCEH_min = 20000,
.tCH_min = 4000,
.tCK_min = 20000,
.tCS_min = 15000,
.tDH_min = 1700,
.tDQSCK_min = 3000,
.tDQSCK_max = 25000,
.tDQSD_min = 0,
.tDQSD_max = 18000,
.tDQSHZ_max = 20000,
.tDQSQ_max = 1700,
.tDS_min = 2000,
.tDSC_min = 20000,
.tFEAT_max = 1000000,
.tITC_max = 1000000,
.tQHS_max = 2000,
.tRHW_min = 100000,
.tRR_min = 20000,
.tRST_max = 500000000,
.tWB_max = 100000,
.tWHR_min = 80000,
.tWRCK_min = 20000,
.tWW_min = 100000,
},
},
/* Mode 3 */
{
.type = NAND_NVDDR_IFACE,
.timings.mode = 3,
.timings.nvddr = {
.tCCS_min = 500000,
.tR_max = 200000000,
.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tAC_min = 3000,
.tAC_max = 25000,
.tADL_min = 400000,
.tCAD_min = 45000,
.tCAH_min = 3000,
.tCALH_min = 3000,
.tCALS_min = 3000,
.tCAS_min = 3000,
.tCEH_min = 20000,
.tCH_min = 3000,
.tCK_min = 15000,
.tCS_min = 15000,
.tDH_min = 1300,
.tDQSCK_min = 3000,
.tDQSCK_max = 25000,
.tDQSD_min = 0,
.tDQSD_max = 18000,
.tDQSHZ_max = 20000,
.tDQSQ_max = 1300,
.tDS_min = 1500,
.tDSC_min = 15000,
.tFEAT_max = 1000000,
.tITC_max = 1000000,
.tQHS_max = 1500,
.tRHW_min = 100000,
.tRR_min = 20000,
.tRST_max = 500000000,
.tWB_max = 100000,
.tWHR_min = 80000,
.tWRCK_min = 20000,
.tWW_min = 100000,
},
},
/* Mode 4 */
{
.type = NAND_NVDDR_IFACE,
.timings.mode = 4,
.timings.nvddr = {
.tCCS_min = 500000,
.tR_max = 200000000,
.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tAC_min = 3000,
.tAC_max = 25000,
.tADL_min = 400000,
.tCAD_min = 45000,
.tCAH_min = 2500,
.tCALH_min = 2500,
.tCALS_min = 2500,
.tCAS_min = 2500,
.tCEH_min = 20000,
.tCH_min = 2500,
.tCK_min = 12000,
.tCS_min = 15000,
.tDH_min = 1100,
.tDQSCK_min = 3000,
.tDQSCK_max = 25000,
.tDQSD_min = 0,
.tDQSD_max = 18000,
.tDQSHZ_max = 20000,
.tDQSQ_max = 1000,
.tDS_min = 1100,
.tDSC_min = 12000,
.tFEAT_max = 1000000,
.tITC_max = 1000000,
.tQHS_max = 1200,
.tRHW_min = 100000,
.tRR_min = 20000,
.tRST_max = 500000000,
.tWB_max = 100000,
.tWHR_min = 80000,
.tWRCK_min = 20000,
.tWW_min = 100000,
},
},
/* Mode 5 */
{
.type = NAND_NVDDR_IFACE,
.timings.mode = 5,
.timings.nvddr = {
.tCCS_min = 500000,
.tR_max = 200000000,
.tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
.tAC_min = 3000,
.tAC_max = 25000,
.tADL_min = 400000,
.tCAD_min = 45000,
.tCAH_min = 2000,
.tCALH_min = 2000,
.tCALS_min = 2000,
.tCAS_min = 2000,
.tCEH_min = 20000,
.tCH_min = 2000,
.tCK_min = 10000,
.tCS_min = 15000,
.tDH_min = 900,
.tDQSCK_min = 3000,
.tDQSCK_max = 25000,
.tDQSD_min = 0,
.tDQSD_max = 18000,
.tDQSHZ_max = 20000,
.tDQSQ_max = 850,
.tDS_min = 900,
.tDSC_min = 10000,
.tFEAT_max = 1000000,
.tITC_max = 1000000,
.tQHS_max = 1000,
.tRHW_min = 100000,
.tRR_min = 20000,
.tRST_max = 500000000,
.tWB_max = 100000,
.tWHR_min = 80000,
.tWRCK_min = 20000,
.tWW_min = 100000,
},
},
};
/* All NAND chips share the same reset data interface: SDR mode 0 */
const struct nand_interface_config *nand_get_reset_interface_config(void)
{
return &onfi_sdr_timings[0];
}
/**
* onfi_find_closest_sdr_mode - Derive the closest ONFI SDR timing mode given a
* set of timings
* @spec_timings: the timings to challenge
*/
unsigned int
onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings)
{
const struct nand_sdr_timings *onfi_timings;
int mode;
for (mode = ARRAY_SIZE(onfi_sdr_timings) - 1; mode > 0; mode--) {
onfi_timings = &onfi_sdr_timings[mode].timings.sdr;
if (spec_timings->tCCS_min <= onfi_timings->tCCS_min &&
spec_timings->tADL_min <= onfi_timings->tADL_min &&
spec_timings->tALH_min <= onfi_timings->tALH_min &&
spec_timings->tALS_min <= onfi_timings->tALS_min &&
spec_timings->tAR_min <= onfi_timings->tAR_min &&
spec_timings->tCEH_min <= onfi_timings->tCEH_min &&
spec_timings->tCH_min <= onfi_timings->tCH_min &&
spec_timings->tCLH_min <= onfi_timings->tCLH_min &&
spec_timings->tCLR_min <= onfi_timings->tCLR_min &&
spec_timings->tCLS_min <= onfi_timings->tCLS_min &&
spec_timings->tCOH_min <= onfi_timings->tCOH_min &&
spec_timings->tCS_min <= onfi_timings->tCS_min &&
spec_timings->tDH_min <= onfi_timings->tDH_min &&
spec_timings->tDS_min <= onfi_timings->tDS_min &&
spec_timings->tIR_min <= onfi_timings->tIR_min &&
spec_timings->tRC_min <= onfi_timings->tRC_min &&
spec_timings->tREH_min <= onfi_timings->tREH_min &&
spec_timings->tRHOH_min <= onfi_timings->tRHOH_min &&
spec_timings->tRHW_min <= onfi_timings->tRHW_min &&
spec_timings->tRLOH_min <= onfi_timings->tRLOH_min &&
spec_timings->tRP_min <= onfi_timings->tRP_min &&
spec_timings->tRR_min <= onfi_timings->tRR_min &&
spec_timings->tWC_min <= onfi_timings->tWC_min &&
spec_timings->tWH_min <= onfi_timings->tWH_min &&
spec_timings->tWHR_min <= onfi_timings->tWHR_min &&
spec_timings->tWP_min <= onfi_timings->tWP_min &&
spec_timings->tWW_min <= onfi_timings->tWW_min)
return mode;
}
return 0;
}
/**
* onfi_find_closest_nvddr_mode - Derive the closest ONFI NVDDR timing mode
* given a set of timings
* @spec_timings: the timings to challenge
*/
unsigned int
onfi_find_closest_nvddr_mode(const struct nand_nvddr_timings *spec_timings)
{
const struct nand_nvddr_timings *onfi_timings;
int mode;
for (mode = ARRAY_SIZE(onfi_nvddr_timings) - 1; mode > 0; mode--) {
onfi_timings = &onfi_nvddr_timings[mode].timings.nvddr;
if (spec_timings->tCCS_min <= onfi_timings->tCCS_min &&
spec_timings->tAC_min <= onfi_timings->tAC_min &&
spec_timings->tADL_min <= onfi_timings->tADL_min &&
spec_timings->tCAD_min <= onfi_timings->tCAD_min &&
spec_timings->tCAH_min <= onfi_timings->tCAH_min &&
spec_timings->tCALH_min <= onfi_timings->tCALH_min &&
spec_timings->tCALS_min <= onfi_timings->tCALS_min &&
spec_timings->tCAS_min <= onfi_timings->tCAS_min &&
spec_timings->tCEH_min <= onfi_timings->tCEH_min &&
spec_timings->tCH_min <= onfi_timings->tCH_min &&
spec_timings->tCK_min <= onfi_timings->tCK_min &&
spec_timings->tCS_min <= onfi_timings->tCS_min &&
spec_timings->tDH_min <= onfi_timings->tDH_min &&
spec_timings->tDQSCK_min <= onfi_timings->tDQSCK_min &&
spec_timings->tDQSD_min <= onfi_timings->tDQSD_min &&
spec_timings->tDS_min <= onfi_timings->tDS_min &&
spec_timings->tDSC_min <= onfi_timings->tDSC_min &&
spec_timings->tRHW_min <= onfi_timings->tRHW_min &&
spec_timings->tRR_min <= onfi_timings->tRR_min &&
spec_timings->tWHR_min <= onfi_timings->tWHR_min &&
spec_timings->tWRCK_min <= onfi_timings->tWRCK_min &&
spec_timings->tWW_min <= onfi_timings->tWW_min)
return mode;
}
return 0;
}
/*
* onfi_fill_sdr_interface_config - Initialize a SDR interface config from a
* given ONFI mode
* @chip: The NAND chip
* @iface: The interface configuration to fill
* @timing_mode: The ONFI timing mode
*/
static void onfi_fill_sdr_interface_config(struct nand_chip *chip,
struct nand_interface_config *iface,
unsigned int timing_mode)
{
struct onfi_params *onfi = chip->parameters.onfi;
if (WARN_ON(timing_mode >= ARRAY_SIZE(onfi_sdr_timings)))
return;
*iface = onfi_sdr_timings[timing_mode];
/*
* Initialize timings that cannot be deduced from timing mode:
* tPROG, tBERS, tR and tCCS.
* These information are part of the ONFI parameter page.
*/
if (onfi) {
struct nand_sdr_timings *timings = &iface->timings.sdr;
/* microseconds -> picoseconds */
timings->tPROG_max = 1000000ULL * onfi->tPROG;
timings->tBERS_max = 1000000ULL * onfi->tBERS;
timings->tR_max = 1000000ULL * onfi->tR;
/* nanoseconds -> picoseconds */
timings->tCCS_min = 1000UL * onfi->tCCS;
}
}
/**
* onfi_fill_nvddr_interface_config - Initialize a NVDDR interface config from a
* given ONFI mode
* @chip: The NAND chip
* @iface: The interface configuration to fill
* @timing_mode: The ONFI timing mode
*/
static void onfi_fill_nvddr_interface_config(struct nand_chip *chip,
struct nand_interface_config *iface,
unsigned int timing_mode)
{
struct onfi_params *onfi = chip->parameters.onfi;
if (WARN_ON(timing_mode >= ARRAY_SIZE(onfi_nvddr_timings)))
return;
*iface = onfi_nvddr_timings[timing_mode];
/*
* Initialize timings that cannot be deduced from timing mode:
* tPROG, tBERS, tR, tCCS and tCAD.
* These information are part of the ONFI parameter page.
*/
if (onfi) {
struct nand_nvddr_timings *timings = &iface->timings.nvddr;
/* microseconds -> picoseconds */
timings->tPROG_max = 1000000ULL * onfi->tPROG;
timings->tBERS_max = 1000000ULL * onfi->tBERS;
timings->tR_max = 1000000ULL * onfi->tR;
/* nanoseconds -> picoseconds */
timings->tCCS_min = 1000UL * onfi->tCCS;
if (onfi->fast_tCAD)
timings->tCAD_min = 25000;
}
}
/**
* onfi_fill_interface_config - Initialize an interface config from a given
* ONFI mode
* @chip: The NAND chip
* @iface: The interface configuration to fill
* @type: The interface type
* @timing_mode: The ONFI timing mode
*/
void onfi_fill_interface_config(struct nand_chip *chip,
struct nand_interface_config *iface,
enum nand_interface_type type,
unsigned int timing_mode)
{
if (type == NAND_SDR_IFACE)
return onfi_fill_sdr_interface_config(chip, iface, timing_mode);
else
return onfi_fill_nvddr_interface_config(chip, iface, timing_mode);
}
| linux-master | drivers/mtd/nand/raw/nand_timings.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 Steven J. Hill ([email protected])
* 2002-2006 Thomas Gleixner ([email protected])
*
* Credits:
* David Woodhouse for adding multichip support
*
* Aleph One Ltd. and Toby Churchill Ltd. for supporting the
* rework for 2K page size chips
*
* This file contains all ONFI helpers.
*/
#include <linux/slab.h>
#include "internals.h"
#define JEDEC_PARAM_PAGES 3
/*
* Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
*/
int nand_jedec_detect(struct nand_chip *chip)
{
struct nand_device *base = &chip->base;
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
struct nand_jedec_params *p;
struct jedec_ecc_info *ecc;
bool use_datain = false;
int jedec_version = 0;
char id[5];
int i, val, ret;
u16 crc;
memorg = nanddev_get_memorg(&chip->base);
/* Try JEDEC for unknown chip or LP */
ret = nand_readid_op(chip, 0x40, id, sizeof(id));
if (ret || strncmp(id, "JEDEC", sizeof(id)))
return 0;
/* JEDEC chip: allocate a buffer to hold its parameter page */
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
if (!nand_has_exec_op(chip) || chip->controller->supported_op.data_only_read)
use_datain = true;
for (i = 0; i < JEDEC_PARAM_PAGES; i++) {
if (!i)
ret = nand_read_param_page_op(chip, 0x40, p,
sizeof(*p));
else if (use_datain)
ret = nand_read_data_op(chip, p, sizeof(*p), true,
false);
else
ret = nand_change_read_column_op(chip, sizeof(*p) * i,
p, sizeof(*p), true);
if (ret) {
ret = 0;
goto free_jedec_param_page;
}
crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 510);
if (crc == le16_to_cpu(p->crc))
break;
}
if (i == JEDEC_PARAM_PAGES) {
pr_err("Could not find valid JEDEC parameter page; aborting\n");
goto free_jedec_param_page;
}
/* Check version */
val = le16_to_cpu(p->revision);
if (val & (1 << 2))
jedec_version = 10;
else if (val & (1 << 1))
jedec_version = 1; /* vendor specific version */
if (!jedec_version) {
pr_info("unsupported JEDEC version: %d\n", val);
goto free_jedec_param_page;
}
sanitize_string(p->manufacturer, sizeof(p->manufacturer));
sanitize_string(p->model, sizeof(p->model));
chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
if (!chip->parameters.model) {
ret = -ENOMEM;
goto free_jedec_param_page;
}
memorg->pagesize = le32_to_cpu(p->byte_per_page);
mtd->writesize = memorg->pagesize;
/* Please reference to the comment for nand_flash_detect_onfi. */
memorg->pages_per_eraseblock =
1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
mtd->erasesize = memorg->pages_per_eraseblock * memorg->pagesize;
memorg->oobsize = le16_to_cpu(p->spare_bytes_per_page);
mtd->oobsize = memorg->oobsize;
memorg->luns_per_target = p->lun_count;
memorg->planes_per_lun = 1 << p->multi_plane_addr;
/* Please reference to the comment for nand_flash_detect_onfi. */
memorg->eraseblocks_per_lun =
1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
memorg->bits_per_cell = p->bits_per_cell;
if (le16_to_cpu(p->features) & JEDEC_FEATURE_16_BIT_BUS)
chip->options |= NAND_BUSWIDTH_16;
/* ECC info */
ecc = &p->ecc_info[0];
if (ecc->codeword_size >= 9) {
struct nand_ecc_props requirements = {
.strength = ecc->ecc_bits,
.step_size = 1 << ecc->codeword_size,
};
nanddev_set_ecc_requirements(base, &requirements);
} else {
pr_warn("Invalid codeword size\n");
}
ret = 1;
free_jedec_param_page:
kfree(p);
return ret;
}
| linux-master | drivers/mtd/nand/raw/nand_jedec.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale Integrated Flash Controller NAND driver
*
* Copyright 2011-2012 Freescale Semiconductor, Inc
*
* Author: Dipen Dudhat <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/fsl_ifc.h>
#include <linux/iopoll.h>
#define ERR_BYTE 0xFF /* Value returned for read
bytes when read failed */
#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
for IFC NAND Machine */
struct fsl_ifc_ctrl;
/* mtd information per set */
struct fsl_ifc_mtd {
struct nand_chip chip;
struct fsl_ifc_ctrl *ctrl;
struct device *dev;
int bank; /* Chip select bank number */
unsigned int bufnum_mask; /* bufnum = page & bufnum_mask */
u8 __iomem *vbase; /* Chip select base virtual address */
};
/* overview of the fsl ifc controller */
struct fsl_ifc_nand_ctrl {
struct nand_controller controller;
struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT];
void __iomem *addr; /* Address of assigned IFC buffer */
unsigned int page; /* Last page written to / read from */
unsigned int read_bytes;/* Number of bytes read during command */
unsigned int column; /* Saved column from SEQIN */
unsigned int index; /* Pointer to next byte to 'read' */
unsigned int oob; /* Non zero if operating on OOB data */
unsigned int eccread; /* Non zero for a full-page ECC read */
unsigned int counter; /* counter for the initializations */
unsigned int max_bitflips; /* Saved during READ0 cmd */
};
static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
/*
* Generic flash bbt descriptors
*/
static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
static struct nand_bbt_descr bbt_main_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
NAND_BBT_2BIT | NAND_BBT_VERSION,
.offs = 2, /* 0 on 8-bit small page */
.len = 4,
.veroffs = 6,
.maxblocks = 4,
.pattern = bbt_pattern,
};
static struct nand_bbt_descr bbt_mirror_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
NAND_BBT_2BIT | NAND_BBT_VERSION,
.offs = 2, /* 0 on 8-bit small page */
.len = 4,
.veroffs = 6,
.maxblocks = 4,
.pattern = mirror_pattern,
};
static int fsl_ifc_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section)
return -ERANGE;
oobregion->offset = 8;
oobregion->length = chip->ecc.total;
return 0;
}
static int fsl_ifc_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section > 1)
return -ERANGE;
if (mtd->writesize == 512 &&
!(chip->options & NAND_BUSWIDTH_16)) {
if (!section) {
oobregion->offset = 0;
oobregion->length = 5;
} else {
oobregion->offset = 6;
oobregion->length = 2;
}
return 0;
}
if (!section) {
oobregion->offset = 2;
oobregion->length = 6;
} else {
oobregion->offset = chip->ecc.total + 8;
oobregion->length = mtd->oobsize - oobregion->offset;
}
return 0;
}
static const struct mtd_ooblayout_ops fsl_ifc_ooblayout_ops = {
.ecc = fsl_ifc_ooblayout_ecc,
.free = fsl_ifc_ooblayout_free,
};
/*
* Set up the IFC hardware block and page address fields, and the ifc nand
* structure addr field to point to the correct IFC buffer in memory
*/
static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
int buf_num;
ifc_nand_ctrl->page = page_addr;
/* Program ROW0/COL0 */
ifc_out32(page_addr, &ifc->ifc_nand.row0);
ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
buf_num = page_addr & priv->bufnum_mask;
ifc_nand_ctrl->addr = priv->vbase + buf_num * (mtd->writesize * 2);
ifc_nand_ctrl->index = column;
/* for OOB data point to the second half of the buffer */
if (oob)
ifc_nand_ctrl->index += mtd->writesize;
}
/* returns nonzero if entire page is blank */
static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
u32 eccstat, unsigned int bufnum)
{
return (eccstat >> ((3 - bufnum % 4) * 8)) & 15;
}
/*
* execute IFC NAND command and wait for it to complete
*/
static void fsl_ifc_run_command(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
u32 eccstat;
int i;
/* set the chip select for NAND Transaction */
ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT,
&ifc->ifc_nand.nand_csel);
dev_vdbg(priv->dev,
"%s: fir0=%08x fcr0=%08x\n",
__func__,
ifc_in32(&ifc->ifc_nand.nand_fir0),
ifc_in32(&ifc->ifc_nand.nand_fcr0));
ctrl->nand_stat = 0;
/* start read/write seq */
ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
msecs_to_jiffies(IFC_TIMEOUT_MSECS));
/* ctrl->nand_stat will be updated from IRQ context */
if (!ctrl->nand_stat)
dev_err(priv->dev, "Controller is not responding\n");
if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_FTOER)
dev_err(priv->dev, "NAND Flash Timeout Error\n");
if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
dev_err(priv->dev, "NAND Flash Write Protect Error\n");
nctrl->max_bitflips = 0;
if (nctrl->eccread) {
int errors;
int bufnum = nctrl->page & priv->bufnum_mask;
int sector_start = bufnum * chip->ecc.steps;
int sector_end = sector_start + chip->ecc.steps - 1;
__be32 __iomem *eccstat_regs;
eccstat_regs = ifc->ifc_nand.nand_eccstat;
eccstat = ifc_in32(&eccstat_regs[sector_start / 4]);
for (i = sector_start; i <= sector_end; i++) {
if (i != sector_start && !(i % 4))
eccstat = ifc_in32(&eccstat_regs[i / 4]);
errors = check_read_ecc(mtd, ctrl, eccstat, i);
if (errors == 15) {
/*
* Uncorrectable error.
* We'll check for blank pages later.
*
* We disable ECCER reporting due to...
* erratum IFC-A002770 -- so report it now if we
* see an uncorrectable error in ECCSTAT.
*/
ctrl->nand_stat |= IFC_NAND_EVTER_STAT_ECCER;
continue;
}
mtd->ecc_stats.corrected += errors;
nctrl->max_bitflips = max_t(unsigned int,
nctrl->max_bitflips,
errors);
}
nctrl->eccread = 0;
}
}
static void fsl_ifc_do_read(struct nand_chip *chip,
int oob,
struct mtd_info *mtd)
{
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
/* Program FIR/IFC_NAND_FCR0 for Small/Large page */
if (mtd->writesize > 512) {
ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
(IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
(IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
&ifc->ifc_nand.nand_fir0);
ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
(NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
&ifc->ifc_nand.nand_fcr0);
} else {
ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
(IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
&ifc->ifc_nand.nand_fir0);
ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
if (oob)
ifc_out32(NAND_CMD_READOOB <<
IFC_NAND_FCR0_CMD0_SHIFT,
&ifc->ifc_nand.nand_fcr0);
else
ifc_out32(NAND_CMD_READ0 <<
IFC_NAND_FCR0_CMD0_SHIFT,
&ifc->ifc_nand.nand_fcr0);
}
}
/* cmdfunc send commands to the IFC NAND Machine */
static void fsl_ifc_cmdfunc(struct nand_chip *chip, unsigned int command,
int column, int page_addr) {
struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
/* clear the read buffer */
ifc_nand_ctrl->read_bytes = 0;
if (command != NAND_CMD_PAGEPROG)
ifc_nand_ctrl->index = 0;
switch (command) {
/* READ0 read the entire buffer to use hardware ECC. */
case NAND_CMD_READ0:
ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, page_addr, 0);
ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
ifc_nand_ctrl->index += column;
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
ifc_nand_ctrl->eccread = 1;
fsl_ifc_do_read(chip, 0, mtd);
fsl_ifc_run_command(mtd);
return;
/* READOOB reads only the OOB because no ECC is performed. */
case NAND_CMD_READOOB:
ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, column, page_addr, 1);
ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
fsl_ifc_do_read(chip, 1, mtd);
fsl_ifc_run_command(mtd);
return;
case NAND_CMD_READID:
case NAND_CMD_PARAM: {
/*
* For READID, read 8 bytes that are currently used.
* For PARAM, read all 3 copies of 256-bytes pages.
*/
int len = 8;
int timing = IFC_FIR_OP_RB;
if (command == NAND_CMD_PARAM) {
timing = IFC_FIR_OP_RBCD;
len = 256 * 3;
}
ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
(timing << IFC_NAND_FIR0_OP2_SHIFT),
&ifc->ifc_nand.nand_fir0);
ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT,
&ifc->ifc_nand.nand_fcr0);
ifc_out32(column, &ifc->ifc_nand.row3);
ifc_out32(len, &ifc->ifc_nand.nand_fbcr);
ifc_nand_ctrl->read_bytes = len;
set_addr(mtd, 0, 0, 0);
fsl_ifc_run_command(mtd);
return;
}
/* ERASE1 stores the block and page address */
case NAND_CMD_ERASE1:
set_addr(mtd, 0, page_addr, 0);
return;
/* ERASE2 uses the block and page address from ERASE1 */
case NAND_CMD_ERASE2:
ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
(IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
&ifc->ifc_nand.nand_fir0);
ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
(NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
&ifc->ifc_nand.nand_fcr0);
ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
ifc_nand_ctrl->read_bytes = 0;
fsl_ifc_run_command(mtd);
return;
/* SEQIN sets up the addr buffer and all registers except the length */
case NAND_CMD_SEQIN: {
u32 nand_fcr0;
ifc_nand_ctrl->column = column;
ifc_nand_ctrl->oob = 0;
if (mtd->writesize > 512) {
nand_fcr0 =
(NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
(NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
ifc_out32(
(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
(IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
&ifc->ifc_nand.nand_fir0);
ifc_out32(
(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
(IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) |
(IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
&ifc->ifc_nand.nand_fir1);
} else {
nand_fcr0 = ((NAND_CMD_PAGEPROG <<
IFC_NAND_FCR0_CMD1_SHIFT) |
(NAND_CMD_SEQIN <<
IFC_NAND_FCR0_CMD2_SHIFT) |
(NAND_CMD_STATUS <<
IFC_NAND_FCR0_CMD3_SHIFT));
ifc_out32(
(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
&ifc->ifc_nand.nand_fir0);
ifc_out32(
(IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
(IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
(IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) |
(IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
&ifc->ifc_nand.nand_fir1);
if (column >= mtd->writesize)
nand_fcr0 |=
NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT;
else
nand_fcr0 |=
NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT;
}
if (column >= mtd->writesize) {
/* OOB area --> READOOB */
column -= mtd->writesize;
ifc_nand_ctrl->oob = 1;
}
ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
return;
}
/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
case NAND_CMD_PAGEPROG: {
if (ifc_nand_ctrl->oob) {
ifc_out32(ifc_nand_ctrl->index -
ifc_nand_ctrl->column,
&ifc->ifc_nand.nand_fbcr);
} else {
ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
}
fsl_ifc_run_command(mtd);
return;
}
case NAND_CMD_STATUS: {
void __iomem *addr;
ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
&ifc->ifc_nand.nand_fir0);
ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
&ifc->ifc_nand.nand_fcr0);
ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, 0, 0);
ifc_nand_ctrl->read_bytes = 1;
fsl_ifc_run_command(mtd);
/*
* The chip always seems to report that it is
* write-protected, even when it is not.
*/
addr = ifc_nand_ctrl->addr;
if (chip->options & NAND_BUSWIDTH_16)
ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr);
else
ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr);
return;
}
case NAND_CMD_RESET:
ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
&ifc->ifc_nand.nand_fir0);
ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
&ifc->ifc_nand.nand_fcr0);
fsl_ifc_run_command(mtd);
return;
default:
dev_err(priv->dev, "%s: error, unsupported command 0x%x.\n",
__func__, command);
}
}
static void fsl_ifc_select_chip(struct nand_chip *chip, int cs)
{
/* The hardware does not seem to support multiple
* chips per bank.
*/
}
/*
* Write buf to the IFC NAND Controller Data Buffer
*/
static void fsl_ifc_write_buf(struct nand_chip *chip, const u8 *buf, int len)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
unsigned int bufsize = mtd->writesize + mtd->oobsize;
if (len <= 0) {
dev_err(priv->dev, "%s: len %d bytes", __func__, len);
return;
}
if ((unsigned int)len > bufsize - ifc_nand_ctrl->index) {
dev_err(priv->dev,
"%s: beyond end of buffer (%d requested, %u available)\n",
__func__, len, bufsize - ifc_nand_ctrl->index);
len = bufsize - ifc_nand_ctrl->index;
}
memcpy_toio(ifc_nand_ctrl->addr + ifc_nand_ctrl->index, buf, len);
ifc_nand_ctrl->index += len;
}
/*
* Read a byte from either the IFC hardware buffer
* read function for 8-bit buswidth
*/
static uint8_t fsl_ifc_read_byte(struct nand_chip *chip)
{
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
unsigned int offset;
/*
* If there are still bytes in the IFC buffer, then use the
* next byte.
*/
if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
offset = ifc_nand_ctrl->index++;
return ifc_in8(ifc_nand_ctrl->addr + offset);
}
dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
return ERR_BYTE;
}
/*
* Read two bytes from the IFC hardware buffer
* read function for 16-bit buswith
*/
static uint8_t fsl_ifc_read_byte16(struct nand_chip *chip)
{
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
uint16_t data;
/*
* If there are still bytes in the IFC buffer, then use the
* next byte.
*/
if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
ifc_nand_ctrl->index += 2;
return (uint8_t) data;
}
dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
return ERR_BYTE;
}
/*
* Read from the IFC Controller Data Buffer
*/
static void fsl_ifc_read_buf(struct nand_chip *chip, u8 *buf, int len)
{
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
int avail;
if (len < 0) {
dev_err(priv->dev, "%s: len %d bytes", __func__, len);
return;
}
avail = min((unsigned int)len,
ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index);
memcpy_fromio(buf, ifc_nand_ctrl->addr + ifc_nand_ctrl->index, avail);
ifc_nand_ctrl->index += avail;
if (len > avail)
dev_err(priv->dev,
"%s: beyond end of buffer (%d requested, %d available)\n",
__func__, len, avail);
}
/*
* This function is called after Program and Erase Operations to
* check for success or failure.
*/
static int fsl_ifc_wait(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
u32 nand_fsr;
int status;
/* Use READ_STATUS command, but wait for the device to be ready */
ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
&ifc->ifc_nand.nand_fir0);
ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
&ifc->ifc_nand.nand_fcr0);
ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, 0, 0);
ifc_nand_ctrl->read_bytes = 1;
fsl_ifc_run_command(mtd);
nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
status = nand_fsr >> 24;
/*
* The chip always seems to report that it is
* write-protected, even when it is not.
*/
return status | NAND_STATUS_WP;
}
/*
* The controller does not check for bitflips in erased pages,
* therefore software must check instead.
*/
static int check_erased_page(struct nand_chip *chip, u8 *buf)
{
struct mtd_info *mtd = nand_to_mtd(chip);
u8 *ecc = chip->oob_poi;
const int ecc_size = chip->ecc.bytes;
const int pkt_size = chip->ecc.size;
int i, res, bitflips = 0;
struct mtd_oob_region oobregion = { };
mtd_ooblayout_ecc(mtd, 0, &oobregion);
ecc += oobregion.offset;
for (i = 0; i < chip->ecc.steps; ++i) {
res = nand_check_erased_ecc_chunk(buf, pkt_size, ecc, ecc_size,
NULL, 0,
chip->ecc.strength);
if (res < 0)
mtd->ecc_stats.failed++;
else
mtd->ecc_stats.corrected += res;
bitflips = max(res, bitflips);
buf += pkt_size;
ecc += ecc_size;
}
return bitflips;
}
static int fsl_ifc_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
fsl_ifc_read_buf(chip, chip->oob_poi, mtd->oobsize);
if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER) {
if (!oob_required)
fsl_ifc_read_buf(chip, chip->oob_poi, mtd->oobsize);
return check_erased_page(chip, buf);
}
if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
mtd->ecc_stats.failed++;
return nctrl->max_bitflips;
}
/* ECC will be calculated automatically, and errors will be detected in
* waitfunc.
*/
static int fsl_ifc_write_page(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
fsl_ifc_write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
static int fsl_ifc_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
u32 csor;
csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor);
/* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
if (csor & CSOR_NAND_ECC_DEC_EN) {
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops);
/* Hardware generates ECC per 512 Bytes */
chip->ecc.size = 512;
if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) {
chip->ecc.bytes = 8;
chip->ecc.strength = 4;
} else {
chip->ecc.bytes = 16;
chip->ecc.strength = 8;
}
} else {
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
}
dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
nanddev_ntargets(&chip->base));
dev_dbg(priv->dev, "%s: nand->chipsize = %lld\n", __func__,
nanddev_target_size(&chip->base));
dev_dbg(priv->dev, "%s: nand->pagemask = %8x\n", __func__,
chip->pagemask);
dev_dbg(priv->dev, "%s: nand->legacy.chip_delay = %d\n", __func__,
chip->legacy.chip_delay);
dev_dbg(priv->dev, "%s: nand->badblockpos = %d\n", __func__,
chip->badblockpos);
dev_dbg(priv->dev, "%s: nand->chip_shift = %d\n", __func__,
chip->chip_shift);
dev_dbg(priv->dev, "%s: nand->page_shift = %d\n", __func__,
chip->page_shift);
dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
chip->phys_erase_shift);
dev_dbg(priv->dev, "%s: nand->ecc.engine_type = %d\n", __func__,
chip->ecc.engine_type);
dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
chip->ecc.steps);
dev_dbg(priv->dev, "%s: nand->ecc.bytes = %d\n", __func__,
chip->ecc.bytes);
dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__,
chip->ecc.total);
dev_dbg(priv->dev, "%s: mtd->ooblayout = %p\n", __func__,
mtd->ooblayout);
dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags);
dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size);
dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__,
mtd->erasesize);
dev_dbg(priv->dev, "%s: mtd->writesize = %d\n", __func__,
mtd->writesize);
dev_dbg(priv->dev, "%s: mtd->oobsize = %d\n", __func__,
mtd->oobsize);
return 0;
}
static const struct nand_controller_ops fsl_ifc_controller_ops = {
.attach_chip = fsl_ifc_attach_chip,
};
static int fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
{
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
uint32_t cs = priv->bank;
if (ctrl->version < FSL_IFC_VERSION_1_1_0)
return 0;
if (ctrl->version > FSL_IFC_VERSION_1_1_0) {
u32 ncfgr, status;
int ret;
/* Trigger auto initialization */
ncfgr = ifc_in32(&ifc_runtime->ifc_nand.ncfgr);
ifc_out32(ncfgr | IFC_NAND_NCFGR_SRAM_INIT_EN, &ifc_runtime->ifc_nand.ncfgr);
/* Wait until done */
ret = readx_poll_timeout(ifc_in32, &ifc_runtime->ifc_nand.ncfgr,
status, !(status & IFC_NAND_NCFGR_SRAM_INIT_EN),
10, IFC_TIMEOUT_MSECS * 1000);
if (ret)
dev_err(priv->dev, "Failed to initialize SRAM!\n");
return ret;
}
/* Save CSOR and CSOR_ext */
csor = ifc_in32(&ifc_global->csor_cs[cs].csor);
csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext);
/* chage PageSize 8K and SpareSize 1K*/
csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor);
ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext);
/* READID */
ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
(IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
&ifc_runtime->ifc_nand.nand_fir0);
ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
&ifc_runtime->ifc_nand.nand_fcr0);
ifc_out32(0x0, &ifc_runtime->ifc_nand.row3);
ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr);
/* Program ROW0/COL0 */
ifc_out32(0x0, &ifc_runtime->ifc_nand.row0);
ifc_out32(0x0, &ifc_runtime->ifc_nand.col0);
/* set the chip select for NAND Transaction */
ifc_out32(cs << IFC_NAND_CSEL_SHIFT,
&ifc_runtime->ifc_nand.nand_csel);
/* start read seq */
ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT,
&ifc_runtime->ifc_nand.nandseq_strt);
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
msecs_to_jiffies(IFC_TIMEOUT_MSECS));
if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) {
pr_err("fsl-ifc: Failed to Initialise SRAM\n");
return -ETIMEDOUT;
}
/* Restore CSOR and CSOR_ext */
ifc_out32(csor, &ifc_global->csor_cs[cs].csor);
ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext);
return 0;
}
static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
{
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
struct nand_chip *chip = &priv->chip;
struct mtd_info *mtd = nand_to_mtd(&priv->chip);
u32 csor;
int ret;
/* Fill in fsl_ifc_mtd structure */
mtd->dev.parent = priv->dev;
nand_set_flash_node(chip, priv->dev->of_node);
/* fill in nand_chip structure */
/* set up function call table */
if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr))
& CSPR_PORT_SIZE_16)
chip->legacy.read_byte = fsl_ifc_read_byte16;
else
chip->legacy.read_byte = fsl_ifc_read_byte;
chip->legacy.write_buf = fsl_ifc_write_buf;
chip->legacy.read_buf = fsl_ifc_read_buf;
chip->legacy.select_chip = fsl_ifc_select_chip;
chip->legacy.cmdfunc = fsl_ifc_cmdfunc;
chip->legacy.waitfunc = fsl_ifc_wait;
chip->legacy.set_features = nand_get_set_features_notsupp;
chip->legacy.get_features = nand_get_set_features_notsupp;
chip->bbt_td = &bbt_main_descr;
chip->bbt_md = &bbt_mirror_descr;
ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr);
/* set up nand options */
chip->bbt_options = NAND_BBT_USE_FLASH;
chip->options = NAND_NO_SUBPAGE_WRITE;
if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)
& CSPR_PORT_SIZE_16) {
chip->legacy.read_byte = fsl_ifc_read_byte16;
chip->options |= NAND_BUSWIDTH_16;
} else {
chip->legacy.read_byte = fsl_ifc_read_byte;
}
chip->controller = &ifc_nand_ctrl->controller;
nand_set_controller_data(chip, priv);
chip->ecc.read_page = fsl_ifc_read_page;
chip->ecc.write_page = fsl_ifc_write_page;
csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor);
switch (csor & CSOR_NAND_PGS_MASK) {
case CSOR_NAND_PGS_512:
if (!(chip->options & NAND_BUSWIDTH_16)) {
/* Avoid conflict with bad block marker */
bbt_main_descr.offs = 0;
bbt_mirror_descr.offs = 0;
}
priv->bufnum_mask = 15;
break;
case CSOR_NAND_PGS_2K:
priv->bufnum_mask = 3;
break;
case CSOR_NAND_PGS_4K:
priv->bufnum_mask = 1;
break;
case CSOR_NAND_PGS_8K:
priv->bufnum_mask = 0;
break;
default:
dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
return -ENODEV;
}
ret = fsl_ifc_sram_init(priv);
if (ret)
return ret;
/*
* As IFC version 2.0.0 has 16KB of internal SRAM as compared to older
* versions which had 8KB. Hence bufnum mask needs to be updated.
*/
if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
priv->bufnum_mask = (priv->bufnum_mask * 2) + 1;
return 0;
}
static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
{
struct mtd_info *mtd = nand_to_mtd(&priv->chip);
kfree(mtd->name);
if (priv->vbase)
iounmap(priv->vbase);
ifc_nand_ctrl->chips[priv->bank] = NULL;
return 0;
}
static int match_bank(struct fsl_ifc_global __iomem *ifc_global, int bank,
phys_addr_t addr)
{
u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr);
if (!(cspr & CSPR_V))
return 0;
if ((cspr & CSPR_MSEL) != CSPR_MSEL_NAND)
return 0;
return (cspr & CSPR_BA) == convert_ifc_address(addr);
}
static DEFINE_MUTEX(fsl_ifc_nand_mutex);
static int fsl_ifc_nand_probe(struct platform_device *dev)
{
struct fsl_ifc_runtime __iomem *ifc;
struct fsl_ifc_mtd *priv;
struct resource res;
static const char *part_probe_types[]
= { "cmdlinepart", "RedBoot", "ofpart", NULL };
int ret;
int bank;
struct device_node *node = dev->dev.of_node;
struct mtd_info *mtd;
if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs)
return -ENODEV;
ifc = fsl_ifc_ctrl_dev->rregs;
/* get, allocate and map the memory resource */
ret = of_address_to_resource(node, 0, &res);
if (ret) {
dev_err(&dev->dev, "%s: failed to get resource\n", __func__);
return ret;
}
/* find which chip select it is connected to */
for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) {
if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start))
break;
}
if (bank >= fsl_ifc_ctrl_dev->banks) {
dev_err(&dev->dev, "%s: address did not match any chip selects\n",
__func__);
return -ENODEV;
}
priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
mutex_lock(&fsl_ifc_nand_mutex);
if (!fsl_ifc_ctrl_dev->nand) {
ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL);
if (!ifc_nand_ctrl) {
mutex_unlock(&fsl_ifc_nand_mutex);
return -ENOMEM;
}
ifc_nand_ctrl->read_bytes = 0;
ifc_nand_ctrl->index = 0;
ifc_nand_ctrl->addr = NULL;
fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl;
nand_controller_init(&ifc_nand_ctrl->controller);
} else {
ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand;
}
mutex_unlock(&fsl_ifc_nand_mutex);
ifc_nand_ctrl->chips[bank] = priv;
priv->bank = bank;
priv->ctrl = fsl_ifc_ctrl_dev;
priv->dev = &dev->dev;
priv->vbase = ioremap(res.start, resource_size(&res));
if (!priv->vbase) {
dev_err(priv->dev, "%s: failed to map chip region\n", __func__);
ret = -ENOMEM;
goto err;
}
dev_set_drvdata(priv->dev, priv);
ifc_out32(IFC_NAND_EVTER_EN_OPC_EN |
IFC_NAND_EVTER_EN_FTOER_EN |
IFC_NAND_EVTER_EN_WPER_EN,
&ifc->ifc_nand.nand_evter_en);
/* enable NAND Machine Interrupts */
ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN |
IFC_NAND_EVTER_INTR_FTOERIR_EN |
IFC_NAND_EVTER_INTR_WPERIR_EN,
&ifc->ifc_nand.nand_evter_intr_en);
mtd = nand_to_mtd(&priv->chip);
mtd->name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
if (!mtd->name) {
ret = -ENOMEM;
goto err;
}
ret = fsl_ifc_chip_init(priv);
if (ret)
goto err;
priv->chip.controller->ops = &fsl_ifc_controller_ops;
ret = nand_scan(&priv->chip, 1);
if (ret)
goto err;
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
if (ret)
goto cleanup_nand;
dev_info(priv->dev, "IFC NAND device at 0x%llx, bank %d\n",
(unsigned long long)res.start, priv->bank);
return 0;
cleanup_nand:
nand_cleanup(&priv->chip);
err:
fsl_ifc_chip_remove(priv);
return ret;
}
static void fsl_ifc_nand_remove(struct platform_device *dev)
{
struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
struct nand_chip *chip = &priv->chip;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
fsl_ifc_chip_remove(priv);
mutex_lock(&fsl_ifc_nand_mutex);
ifc_nand_ctrl->counter--;
if (!ifc_nand_ctrl->counter) {
fsl_ifc_ctrl_dev->nand = NULL;
kfree(ifc_nand_ctrl);
}
mutex_unlock(&fsl_ifc_nand_mutex);
}
static const struct of_device_id fsl_ifc_nand_match[] = {
{
.compatible = "fsl,ifc-nand",
},
{}
};
MODULE_DEVICE_TABLE(of, fsl_ifc_nand_match);
static struct platform_driver fsl_ifc_nand_driver = {
.driver = {
.name = "fsl,ifc-nand",
.of_match_table = fsl_ifc_nand_match,
},
.probe = fsl_ifc_nand_probe,
.remove_new = fsl_ifc_nand_remove,
};
module_platform_driver(fsl_ifc_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Freescale");
MODULE_DESCRIPTION("Freescale Integrated Flash Controller MTD NAND driver");
| linux-master | drivers/mtd/nand/raw/fsl_ifc_nand.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2004-2008 Freescale Semiconductor, Inc.
* Copyright 2009 Semihalf.
*
* Approved as OSADL project by a majority of OSADL members and funded
* by OSADL membership fees in 2009; for details see www.osadl.org.
*
* Based on original driver from Freescale Semiconductor
* written by John Rigby <[email protected]> on basis of mxc_nand.c.
* Reworked and extended by Piotr Ziecik <[email protected]>.
*/
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <asm/mpc5121.h>
/* Addresses for NFC MAIN RAM BUFFER areas */
#define NFC_MAIN_AREA(n) ((n) * 0x200)
/* Addresses for NFC SPARE BUFFER areas */
#define NFC_SPARE_BUFFERS 8
#define NFC_SPARE_LEN 0x40
#define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN))
/* MPC5121 NFC registers */
#define NFC_BUF_ADDR 0x1E04
#define NFC_FLASH_ADDR 0x1E06
#define NFC_FLASH_CMD 0x1E08
#define NFC_CONFIG 0x1E0A
#define NFC_ECC_STATUS1 0x1E0C
#define NFC_ECC_STATUS2 0x1E0E
#define NFC_SPAS 0x1E10
#define NFC_WRPROT 0x1E12
#define NFC_NF_WRPRST 0x1E18
#define NFC_CONFIG1 0x1E1A
#define NFC_CONFIG2 0x1E1C
#define NFC_UNLOCKSTART_BLK0 0x1E20
#define NFC_UNLOCKEND_BLK0 0x1E22
#define NFC_UNLOCKSTART_BLK1 0x1E24
#define NFC_UNLOCKEND_BLK1 0x1E26
#define NFC_UNLOCKSTART_BLK2 0x1E28
#define NFC_UNLOCKEND_BLK2 0x1E2A
#define NFC_UNLOCKSTART_BLK3 0x1E2C
#define NFC_UNLOCKEND_BLK3 0x1E2E
/* Bit Definitions: NFC_BUF_ADDR */
#define NFC_RBA_MASK (7 << 0)
#define NFC_ACTIVE_CS_SHIFT 5
#define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT)
/* Bit Definitions: NFC_CONFIG */
#define NFC_BLS_UNLOCKED (1 << 1)
/* Bit Definitions: NFC_CONFIG1 */
#define NFC_ECC_4BIT (1 << 0)
#define NFC_FULL_PAGE_DMA (1 << 1)
#define NFC_SPARE_ONLY (1 << 2)
#define NFC_ECC_ENABLE (1 << 3)
#define NFC_INT_MASK (1 << 4)
#define NFC_BIG_ENDIAN (1 << 5)
#define NFC_RESET (1 << 6)
#define NFC_CE (1 << 7)
#define NFC_ONE_CYCLE (1 << 8)
#define NFC_PPB_32 (0 << 9)
#define NFC_PPB_64 (1 << 9)
#define NFC_PPB_128 (2 << 9)
#define NFC_PPB_256 (3 << 9)
#define NFC_PPB_MASK (3 << 9)
#define NFC_FULL_PAGE_INT (1 << 11)
/* Bit Definitions: NFC_CONFIG2 */
#define NFC_COMMAND (1 << 0)
#define NFC_ADDRESS (1 << 1)
#define NFC_INPUT (1 << 2)
#define NFC_OUTPUT (1 << 3)
#define NFC_ID (1 << 4)
#define NFC_STATUS (1 << 5)
#define NFC_CMD_FAIL (1 << 15)
#define NFC_INT (1 << 15)
/* Bit Definitions: NFC_WRPROT */
#define NFC_WPC_LOCK_TIGHT (1 << 0)
#define NFC_WPC_LOCK (1 << 1)
#define NFC_WPC_UNLOCK (1 << 2)
#define DRV_NAME "mpc5121_nfc"
/* Timeouts */
#define NFC_RESET_TIMEOUT 1000 /* 1 ms */
#define NFC_TIMEOUT (HZ / 10) /* 1/10 s */
struct mpc5121_nfc_prv {
struct nand_controller controller;
struct nand_chip chip;
int irq;
void __iomem *regs;
struct clk *clk;
wait_queue_head_t irq_waitq;
uint column;
int spareonly;
void __iomem *csreg;
struct device *dev;
};
static void mpc5121_nfc_done(struct mtd_info *mtd);
/* Read NFC register */
static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
return in_be16(prv->regs + reg);
}
/* Write NFC register */
static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
out_be16(prv->regs + reg, val);
}
/* Set bits in NFC register */
static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits)
{
nfc_write(mtd, reg, nfc_read(mtd, reg) | bits);
}
/* Clear bits in NFC register */
static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits)
{
nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits);
}
/* Invoke address cycle */
static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr)
{
nfc_write(mtd, NFC_FLASH_ADDR, addr);
nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS);
mpc5121_nfc_done(mtd);
}
/* Invoke command cycle */
static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd)
{
nfc_write(mtd, NFC_FLASH_CMD, cmd);
nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND);
mpc5121_nfc_done(mtd);
}
/* Send data from NFC buffers to NAND flash */
static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd)
{
nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
nfc_write(mtd, NFC_CONFIG2, NFC_INPUT);
mpc5121_nfc_done(mtd);
}
/* Receive data from NAND flash */
static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd)
{
nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT);
mpc5121_nfc_done(mtd);
}
/* Receive ID from NAND flash */
static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd)
{
nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
nfc_write(mtd, NFC_CONFIG2, NFC_ID);
mpc5121_nfc_done(mtd);
}
/* Receive status from NAND flash */
static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd)
{
nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
nfc_write(mtd, NFC_CONFIG2, NFC_STATUS);
mpc5121_nfc_done(mtd);
}
/* NFC interrupt handler */
static irqreturn_t mpc5121_nfc_irq(int irq, void *data)
{
struct mtd_info *mtd = data;
struct nand_chip *chip = mtd_to_nand(mtd);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK);
wake_up(&prv->irq_waitq);
return IRQ_HANDLED;
}
/* Wait for operation complete */
static void mpc5121_nfc_done(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
int rv;
if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) {
nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK);
rv = wait_event_timeout(prv->irq_waitq,
(nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT);
if (!rv)
dev_warn(prv->dev,
"Timeout while waiting for interrupt.\n");
}
nfc_clear(mtd, NFC_CONFIG2, NFC_INT);
}
/* Do address cycle(s) */
static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
{
struct nand_chip *chip = mtd_to_nand(mtd);
u32 pagemask = chip->pagemask;
if (column != -1) {
mpc5121_nfc_send_addr(mtd, column);
if (mtd->writesize > 512)
mpc5121_nfc_send_addr(mtd, column >> 8);
}
if (page != -1) {
do {
mpc5121_nfc_send_addr(mtd, page & 0xFF);
page >>= 8;
pagemask >>= 8;
} while (pagemask);
}
}
/* Control chip select signals */
static void mpc5121_nfc_select_chip(struct nand_chip *nand, int chip)
{
struct mtd_info *mtd = nand_to_mtd(nand);
if (chip < 0) {
nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
return;
}
nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK);
nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) &
NFC_ACTIVE_CS_MASK);
nfc_set(mtd, NFC_CONFIG1, NFC_CE);
}
/* Init external chip select logic on ADS5121 board */
static int ads5121_chipselect_init(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
struct device_node *dn;
dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld");
if (dn) {
prv->csreg = of_iomap(dn, 0);
of_node_put(dn);
if (!prv->csreg)
return -ENOMEM;
/* CPLD Register 9 controls NAND /CE Lines */
prv->csreg += 9;
return 0;
}
return -EINVAL;
}
/* Control chips select signal on ADS5121 board */
static void ads5121_select_chip(struct nand_chip *nand, int chip)
{
struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
u8 v;
v = in_8(prv->csreg);
v |= 0x0F;
if (chip >= 0) {
mpc5121_nfc_select_chip(nand, 0);
v &= ~(1 << chip);
} else
mpc5121_nfc_select_chip(nand, -1);
out_8(prv->csreg, v);
}
/* Read NAND Ready/Busy signal */
static int mpc5121_nfc_dev_ready(struct nand_chip *nand)
{
/*
* NFC handles ready/busy signal internally. Therefore, this function
* always returns status as ready.
*/
return 1;
}
/* Write command to NAND flash */
static void mpc5121_nfc_command(struct nand_chip *chip, unsigned command,
int column, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
prv->column = (column >= 0) ? column : 0;
prv->spareonly = 0;
switch (command) {
case NAND_CMD_PAGEPROG:
mpc5121_nfc_send_prog_page(mtd);
break;
/*
* NFC does not support sub-page reads and writes,
* so emulate them using full page transfers.
*/
case NAND_CMD_READ0:
column = 0;
break;
case NAND_CMD_READ1:
prv->column += 256;
command = NAND_CMD_READ0;
column = 0;
break;
case NAND_CMD_READOOB:
prv->spareonly = 1;
command = NAND_CMD_READ0;
column = 0;
break;
case NAND_CMD_SEQIN:
mpc5121_nfc_command(chip, NAND_CMD_READ0, column, page);
column = 0;
break;
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
case NAND_CMD_READID:
case NAND_CMD_STATUS:
break;
default:
return;
}
mpc5121_nfc_send_cmd(mtd, command);
mpc5121_nfc_addr_cycle(mtd, column, page);
switch (command) {
case NAND_CMD_READ0:
if (mtd->writesize > 512)
mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART);
mpc5121_nfc_send_read_page(mtd);
break;
case NAND_CMD_READID:
mpc5121_nfc_send_read_id(mtd);
break;
case NAND_CMD_STATUS:
mpc5121_nfc_send_read_status(mtd);
if (chip->options & NAND_BUSWIDTH_16)
prv->column = 1;
else
prv->column = 0;
break;
}
}
/* Copy data from/to NFC spare buffers. */
static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
u8 *buffer, uint size, int wr)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
uint o, s, sbsize, blksize;
/*
* NAND spare area is available through NFC spare buffers.
* The NFC divides spare area into (page_size / 512) chunks.
* Each chunk is placed into separate spare memory area, using
* first (spare_size / num_of_chunks) bytes of the buffer.
*
* For NAND device in which the spare area is not divided fully
* by the number of chunks, number of used bytes in each spare
* buffer is rounded down to the nearest even number of bytes,
* and all remaining bytes are added to the last used spare area.
*
* For more information read section 26.6.10 of MPC5121e
* Microcontroller Reference Manual, Rev. 3.
*/
/* Calculate number of valid bytes in each spare buffer */
sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1;
while (size) {
/* Calculate spare buffer number */
s = offset / sbsize;
if (s > NFC_SPARE_BUFFERS - 1)
s = NFC_SPARE_BUFFERS - 1;
/*
* Calculate offset to requested data block in selected spare
* buffer and its size.
*/
o = offset - (s * sbsize);
blksize = min(sbsize - o, size);
if (wr)
memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o,
buffer, blksize);
else
memcpy_fromio(buffer,
prv->regs + NFC_SPARE_AREA(s) + o, blksize);
buffer += blksize;
offset += blksize;
size -= blksize;
}
}
/* Copy data from/to NFC main and spare buffers */
static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len,
int wr)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
uint c = prv->column;
uint l;
/* Handle spare area access */
if (prv->spareonly || c >= mtd->writesize) {
/* Calculate offset from beginning of spare area */
if (c >= mtd->writesize)
c -= mtd->writesize;
prv->column += len;
mpc5121_nfc_copy_spare(mtd, c, buf, len, wr);
return;
}
/*
* Handle main area access - limit copy length to prevent
* crossing main/spare boundary.
*/
l = min((uint)len, mtd->writesize - c);
prv->column += l;
if (wr)
memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l);
else
memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l);
/* Handle crossing main/spare boundary */
if (l != len) {
buf += l;
len -= l;
mpc5121_nfc_buf_copy(mtd, buf, len, wr);
}
}
/* Read data from NFC buffers */
static void mpc5121_nfc_read_buf(struct nand_chip *chip, u_char *buf, int len)
{
mpc5121_nfc_buf_copy(nand_to_mtd(chip), buf, len, 0);
}
/* Write data to NFC buffers */
static void mpc5121_nfc_write_buf(struct nand_chip *chip, const u_char *buf,
int len)
{
mpc5121_nfc_buf_copy(nand_to_mtd(chip), (u_char *)buf, len, 1);
}
/* Read byte from NFC buffers */
static u8 mpc5121_nfc_read_byte(struct nand_chip *chip)
{
u8 tmp;
mpc5121_nfc_read_buf(chip, &tmp, sizeof(tmp));
return tmp;
}
/*
* Read NFC configuration from Reset Config Word
*
* NFC is configured during reset in basis of information stored
* in Reset Config Word. There is no other way to set NAND block
* size, spare size and bus width.
*/
static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
struct mpc512x_reset_module *rm;
struct device_node *rmnode;
uint rcw_pagesize = 0;
uint rcw_sparesize = 0;
uint rcw_width;
uint rcwh;
uint romloc, ps;
int ret = 0;
rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
if (!rmnode) {
dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' "
"node in device tree!\n");
return -ENODEV;
}
rm = of_iomap(rmnode, 0);
if (!rm) {
dev_err(prv->dev, "Error mapping reset module node!\n");
ret = -EBUSY;
goto out;
}
rcwh = in_be32(&rm->rcwhr);
/* Bit 6: NFC bus width */
rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1;
/* Bit 7: NFC Page/Spare size */
ps = (rcwh >> 7) & 0x1;
/* Bits [22:21]: ROM Location */
romloc = (rcwh >> 21) & 0x3;
/* Decode RCW bits */
switch ((ps << 2) | romloc) {
case 0x00:
case 0x01:
rcw_pagesize = 512;
rcw_sparesize = 16;
break;
case 0x02:
case 0x03:
rcw_pagesize = 4096;
rcw_sparesize = 128;
break;
case 0x04:
case 0x05:
rcw_pagesize = 2048;
rcw_sparesize = 64;
break;
case 0x06:
case 0x07:
rcw_pagesize = 4096;
rcw_sparesize = 218;
break;
}
mtd->writesize = rcw_pagesize;
mtd->oobsize = rcw_sparesize;
if (rcw_width == 2)
chip->options |= NAND_BUSWIDTH_16;
dev_notice(prv->dev, "Configured for "
"%u-bit NAND, page size %u "
"with %u spare.\n",
rcw_width * 8, rcw_pagesize,
rcw_sparesize);
iounmap(rm);
out:
of_node_put(rmnode);
return ret;
}
/* Free driver resources */
static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
if (prv->csreg)
iounmap(prv->csreg);
}
static int mpc5121_nfc_attach_chip(struct nand_chip *chip)
{
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
static const struct nand_controller_ops mpc5121_nfc_ops = {
.attach_chip = mpc5121_nfc_attach_chip,
};
static int mpc5121_nfc_probe(struct platform_device *op)
{
struct device_node *dn = op->dev.of_node;
struct clk *clk;
struct device *dev = &op->dev;
struct mpc5121_nfc_prv *prv;
struct resource res;
struct mtd_info *mtd;
struct nand_chip *chip;
unsigned long regs_paddr, regs_size;
const __be32 *chips_no;
int resettime = 0;
int retval = 0;
int rev, len;
/*
* Check SoC revision. This driver supports only NFC
* in MPC5121 revision 2 and MPC5123 revision 3.
*/
rev = (mfspr(SPRN_SVR) >> 4) & 0xF;
if ((rev != 2) && (rev != 3)) {
dev_err(dev, "SoC revision %u is not supported!\n", rev);
return -ENXIO;
}
prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
if (!prv)
return -ENOMEM;
chip = &prv->chip;
mtd = nand_to_mtd(chip);
nand_controller_init(&prv->controller);
prv->controller.ops = &mpc5121_nfc_ops;
chip->controller = &prv->controller;
mtd->dev.parent = dev;
nand_set_controller_data(chip, prv);
nand_set_flash_node(chip, dn);
prv->dev = dev;
/* Read NFC configuration from Reset Config Word */
retval = mpc5121_nfc_read_hw_config(mtd);
if (retval) {
dev_err(dev, "Unable to read NFC config!\n");
return retval;
}
prv->irq = irq_of_parse_and_map(dn, 0);
if (!prv->irq) {
dev_err(dev, "Error mapping IRQ!\n");
return -EINVAL;
}
retval = of_address_to_resource(dn, 0, &res);
if (retval) {
dev_err(dev, "Error parsing memory region!\n");
return retval;
}
chips_no = of_get_property(dn, "chips", &len);
if (!chips_no || len != sizeof(*chips_no)) {
dev_err(dev, "Invalid/missing 'chips' property!\n");
return -EINVAL;
}
regs_paddr = res.start;
regs_size = resource_size(&res);
if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) {
dev_err(dev, "Error requesting memory region!\n");
return -EBUSY;
}
prv->regs = devm_ioremap(dev, regs_paddr, regs_size);
if (!prv->regs) {
dev_err(dev, "Error mapping memory region!\n");
return -ENOMEM;
}
mtd->name = "MPC5121 NAND";
chip->legacy.dev_ready = mpc5121_nfc_dev_ready;
chip->legacy.cmdfunc = mpc5121_nfc_command;
chip->legacy.read_byte = mpc5121_nfc_read_byte;
chip->legacy.read_buf = mpc5121_nfc_read_buf;
chip->legacy.write_buf = mpc5121_nfc_write_buf;
chip->legacy.select_chip = mpc5121_nfc_select_chip;
chip->legacy.set_features = nand_get_set_features_notsupp;
chip->legacy.get_features = nand_get_set_features_notsupp;
chip->bbt_options = NAND_BBT_USE_FLASH;
/* Support external chip-select logic on ADS5121 board */
if (of_machine_is_compatible("fsl,mpc5121ads")) {
retval = ads5121_chipselect_init(mtd);
if (retval) {
dev_err(dev, "Chipselect init error!\n");
return retval;
}
chip->legacy.select_chip = ads5121_select_chip;
}
/* Enable NFC clock */
clk = devm_clk_get_enabled(dev, "ipg");
if (IS_ERR(clk)) {
dev_err(dev, "Unable to acquire and enable NFC clock!\n");
retval = PTR_ERR(clk);
goto error;
}
prv->clk = clk;
/* Reset NAND Flash controller */
nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) {
if (resettime++ >= NFC_RESET_TIMEOUT) {
dev_err(dev, "Timeout while resetting NFC!\n");
retval = -EINVAL;
goto error;
}
udelay(1);
}
/* Enable write to NFC memory */
nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED);
/* Enable write to all NAND pages */
nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000);
nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF);
nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK);
/*
* Setup NFC:
* - Big Endian transfers,
* - Interrupt after full page read/write.
*/
nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK |
NFC_FULL_PAGE_INT);
/* Set spare area size */
nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1);
init_waitqueue_head(&prv->irq_waitq);
retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME,
mtd);
if (retval) {
dev_err(dev, "Error requesting IRQ!\n");
goto error;
}
/*
* This driver assumes that the default ECC engine should be TYPE_SOFT.
* Set ->engine_type before registering the NAND devices in order to
* provide a driver specific default value.
*/
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
/* Detect NAND chips */
retval = nand_scan(chip, be32_to_cpup(chips_no));
if (retval) {
dev_err(dev, "NAND Flash not found !\n");
goto error;
}
/* Set erase block size */
switch (mtd->erasesize / mtd->writesize) {
case 32:
nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32);
break;
case 64:
nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64);
break;
case 128:
nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128);
break;
case 256:
nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256);
break;
default:
dev_err(dev, "Unsupported NAND flash!\n");
retval = -ENXIO;
goto error;
}
dev_set_drvdata(dev, mtd);
/* Register device in MTD */
retval = mtd_device_register(mtd, NULL, 0);
if (retval) {
dev_err(dev, "Error adding MTD device!\n");
goto error;
}
return 0;
error:
mpc5121_nfc_free(dev, mtd);
return retval;
}
static void mpc5121_nfc_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct mtd_info *mtd = dev_get_drvdata(dev);
int ret;
ret = mtd_device_unregister(mtd);
WARN_ON(ret);
nand_cleanup(mtd_to_nand(mtd));
mpc5121_nfc_free(dev, mtd);
}
static const struct of_device_id mpc5121_nfc_match[] = {
{ .compatible = "fsl,mpc5121-nfc", },
{},
};
MODULE_DEVICE_TABLE(of, mpc5121_nfc_match);
static struct platform_driver mpc5121_nfc_driver = {
.probe = mpc5121_nfc_probe,
.remove_new = mpc5121_nfc_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = mpc5121_nfc_match,
},
};
module_platform_driver(mpc5121_nfc_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/nand/raw/mpc5121_nfc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 Steven J. Hill ([email protected])
* 2002-2006 Thomas Gleixner ([email protected])
*
* Credits:
* David Woodhouse for adding multichip support
*
* Aleph One Ltd. and Toby Churchill Ltd. for supporting the
* rework for 2K page size chips
*
* This file contains all ONFI helpers.
*/
#include <linux/slab.h>
#include "internals.h"
#define ONFI_PARAM_PAGES 3
u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
{
int i;
while (len--) {
crc ^= *p++ << 8;
for (i = 0; i < 8; i++)
crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
}
return crc;
}
/* Parse the Extended Parameter Page. */
static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
struct nand_onfi_params *p)
{
struct nand_device *base = &chip->base;
struct nand_ecc_props requirements;
struct onfi_ext_param_page *ep;
struct onfi_ext_section *s;
struct onfi_ext_ecc_info *ecc;
uint8_t *cursor;
int ret;
int len;
int i;
len = le16_to_cpu(p->ext_param_page_length) * 16;
ep = kmalloc(len, GFP_KERNEL);
if (!ep)
return -ENOMEM;
/*
* Use the Change Read Column command to skip the ONFI param pages and
* ensure we read at the right location.
*/
ret = nand_change_read_column_op(chip,
sizeof(*p) * p->num_of_param_pages,
ep, len, true);
if (ret)
goto ext_out;
ret = -EINVAL;
if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
!= le16_to_cpu(ep->crc))) {
pr_debug("fail in the CRC.\n");
goto ext_out;
}
/*
* Check the signature.
* Do not strictly follow the ONFI spec, maybe changed in future.
*/
if (strncmp(ep->sig, "EPPS", 4)) {
pr_debug("The signature is invalid.\n");
goto ext_out;
}
/* find the ECC section. */
cursor = (uint8_t *)(ep + 1);
for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
s = ep->sections + i;
if (s->type == ONFI_SECTION_TYPE_2)
break;
cursor += s->length * 16;
}
if (i == ONFI_EXT_SECTION_MAX) {
pr_debug("We can not find the ECC section.\n");
goto ext_out;
}
/* get the info we want. */
ecc = (struct onfi_ext_ecc_info *)cursor;
if (!ecc->codeword_size) {
pr_debug("Invalid codeword size\n");
goto ext_out;
}
requirements.strength = ecc->ecc_bits;
requirements.step_size = 1 << ecc->codeword_size;
nanddev_set_ecc_requirements(base, &requirements);
ret = 0;
ext_out:
kfree(ep);
return ret;
}
/*
* Recover data with bit-wise majority
*/
static void nand_bit_wise_majority(const void **srcbufs,
unsigned int nsrcbufs,
void *dstbuf,
unsigned int bufsize)
{
int i, j, k;
for (i = 0; i < bufsize; i++) {
u8 val = 0;
for (j = 0; j < 8; j++) {
unsigned int cnt = 0;
for (k = 0; k < nsrcbufs; k++) {
const u8 *srcbuf = srcbufs[k];
if (srcbuf[i] & BIT(j))
cnt++;
}
if (cnt > nsrcbufs / 2)
val |= BIT(j);
}
((u8 *)dstbuf)[i] = val;
}
}
/*
* Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
*/
int nand_onfi_detect(struct nand_chip *chip)
{
struct nand_device *base = &chip->base;
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
struct nand_onfi_params *p = NULL, *pbuf;
struct onfi_params *onfi;
bool use_datain = false;
int onfi_version = 0;
char id[4];
int i, ret, val;
u16 crc;
memorg = nanddev_get_memorg(&chip->base);
/* Try ONFI for unknown chip or LP */
ret = nand_readid_op(chip, 0x20, id, sizeof(id));
if (ret || strncmp(id, "ONFI", 4))
return 0;
/* ONFI chip: allocate a buffer to hold its parameter page */
pbuf = kzalloc((sizeof(*pbuf) * ONFI_PARAM_PAGES), GFP_KERNEL);
if (!pbuf)
return -ENOMEM;
if (!nand_has_exec_op(chip) || chip->controller->supported_op.data_only_read)
use_datain = true;
for (i = 0; i < ONFI_PARAM_PAGES; i++) {
if (!i)
ret = nand_read_param_page_op(chip, 0, &pbuf[i],
sizeof(*pbuf));
else if (use_datain)
ret = nand_read_data_op(chip, &pbuf[i], sizeof(*pbuf),
true, false);
else
ret = nand_change_read_column_op(chip, sizeof(*pbuf) * i,
&pbuf[i], sizeof(*pbuf),
true);
if (ret) {
ret = 0;
goto free_onfi_param_page;
}
crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)&pbuf[i], 254);
if (crc == le16_to_cpu(pbuf[i].crc)) {
p = &pbuf[i];
break;
}
}
if (i == ONFI_PARAM_PAGES) {
const void *srcbufs[ONFI_PARAM_PAGES];
unsigned int j;
for (j = 0; j < ONFI_PARAM_PAGES; j++)
srcbufs[j] = pbuf + j;
pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n");
nand_bit_wise_majority(srcbufs, ONFI_PARAM_PAGES, pbuf,
sizeof(*pbuf));
crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)pbuf, 254);
if (crc != le16_to_cpu(pbuf->crc)) {
pr_err("ONFI parameter recovery failed, aborting\n");
goto free_onfi_param_page;
}
p = pbuf;
}
if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
chip->manufacturer.desc->ops->fixup_onfi_param_page)
chip->manufacturer.desc->ops->fixup_onfi_param_page(chip, p);
/* Check version */
val = le16_to_cpu(p->revision);
if (val & ONFI_VERSION_2_3)
onfi_version = 23;
else if (val & ONFI_VERSION_2_2)
onfi_version = 22;
else if (val & ONFI_VERSION_2_1)
onfi_version = 21;
else if (val & ONFI_VERSION_2_0)
onfi_version = 20;
else if (val & ONFI_VERSION_1_0)
onfi_version = 10;
if (!onfi_version) {
pr_info("unsupported ONFI version: %d\n", val);
goto free_onfi_param_page;
}
sanitize_string(p->manufacturer, sizeof(p->manufacturer));
sanitize_string(p->model, sizeof(p->model));
chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
if (!chip->parameters.model) {
ret = -ENOMEM;
goto free_onfi_param_page;
}
memorg->pagesize = le32_to_cpu(p->byte_per_page);
mtd->writesize = memorg->pagesize;
/*
* pages_per_block and blocks_per_lun may not be a power-of-2 size
* (don't ask me who thought of this...). MTD assumes that these
* dimensions will be power-of-2, so just truncate the remaining area.
*/
memorg->pages_per_eraseblock =
1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
mtd->erasesize = memorg->pages_per_eraseblock * memorg->pagesize;
memorg->oobsize = le16_to_cpu(p->spare_bytes_per_page);
mtd->oobsize = memorg->oobsize;
memorg->luns_per_target = p->lun_count;
memorg->planes_per_lun = 1 << p->interleaved_bits;
/* See erasesize comment */
memorg->eraseblocks_per_lun =
1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
memorg->max_bad_eraseblocks_per_lun = le32_to_cpu(p->blocks_per_lun);
memorg->bits_per_cell = p->bits_per_cell;
if (le16_to_cpu(p->features) & ONFI_FEATURE_16_BIT_BUS)
chip->options |= NAND_BUSWIDTH_16;
if (p->ecc_bits != 0xff) {
struct nand_ecc_props requirements = {
.strength = p->ecc_bits,
.step_size = 512,
};
nanddev_set_ecc_requirements(base, &requirements);
} else if (onfi_version >= 21 &&
(le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
/*
* The nand_flash_detect_ext_param_page() uses the
* Change Read Column command which maybe not supported
* by the chip->legacy.cmdfunc. So try to update the
* chip->legacy.cmdfunc now. We do not replace user supplied
* command function.
*/
nand_legacy_adjust_cmdfunc(chip);
/* The Extended Parameter Page is supported since ONFI 2.1. */
if (nand_flash_detect_ext_param_page(chip, p))
pr_warn("Failed to detect ONFI extended param page\n");
} else {
pr_warn("Could not retrieve ONFI ECC requirements\n");
}
/* Save some parameters from the parameter page for future use */
if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_SET_GET_FEATURES) {
chip->parameters.supports_set_get_features = true;
bitmap_set(chip->parameters.get_feature_list,
ONFI_FEATURE_ADDR_TIMING_MODE, 1);
bitmap_set(chip->parameters.set_feature_list,
ONFI_FEATURE_ADDR_TIMING_MODE, 1);
}
onfi = kzalloc(sizeof(*onfi), GFP_KERNEL);
if (!onfi) {
ret = -ENOMEM;
goto free_model;
}
onfi->version = onfi_version;
onfi->tPROG = le16_to_cpu(p->t_prog);
onfi->tBERS = le16_to_cpu(p->t_bers);
onfi->tR = le16_to_cpu(p->t_r);
onfi->tCCS = le16_to_cpu(p->t_ccs);
onfi->fast_tCAD = le16_to_cpu(p->nvddr_nvddr2_features) & BIT(0);
onfi->sdr_timing_modes = le16_to_cpu(p->sdr_timing_modes);
if (le16_to_cpu(p->features) & ONFI_FEATURE_NV_DDR)
onfi->nvddr_timing_modes = le16_to_cpu(p->nvddr_timing_modes);
onfi->vendor_revision = le16_to_cpu(p->vendor_revision);
memcpy(onfi->vendor, p->vendor, sizeof(p->vendor));
chip->parameters.onfi = onfi;
/* Identification done, free the full ONFI parameter page and exit */
kfree(pbuf);
return 1;
free_model:
kfree(chip->parameters.model);
free_onfi_param_page:
kfree(pbuf);
return ret;
}
| linux-master | drivers/mtd/nand/raw/nand_onfi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright © 2012 John Crispin <[email protected]>
* Copyright © 2016 Hauke Mehrtens <[email protected]>
*/
#include <linux/mtd/rawnand.h>
#include <linux/of_gpio.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <lantiq_soc.h>
/* nand registers */
#define EBU_ADDSEL1 0x24
#define EBU_NAND_CON 0xB0
#define EBU_NAND_WAIT 0xB4
#define NAND_WAIT_RD BIT(0) /* NAND flash status output */
#define NAND_WAIT_WR_C BIT(3) /* NAND Write/Read complete */
#define EBU_NAND_ECC0 0xB8
#define EBU_NAND_ECC_AC 0xBC
/*
* nand commands
* The pins of the NAND chip are selected based on the address bits of the
* "register" read and write. There are no special registers, but an
* address range and the lower address bits are used to activate the
* correct line. For example when the bit (1 << 2) is set in the address
* the ALE pin will be activated.
*/
#define NAND_CMD_ALE BIT(2) /* address latch enable */
#define NAND_CMD_CLE BIT(3) /* command latch enable */
#define NAND_CMD_CS BIT(4) /* chip select */
#define NAND_CMD_SE BIT(5) /* spare area access latch */
#define NAND_CMD_WP BIT(6) /* write protect */
#define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE)
#define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE)
#define NAND_WRITE_DATA (NAND_CMD_CS)
#define NAND_READ_DATA (NAND_CMD_CS)
/* we need to tel the ebu which addr we mapped the nand to */
#define ADDSEL1_MASK(x) (x << 4)
#define ADDSEL1_REGEN 1
/* we need to tell the EBU that we have nand attached and set it up properly */
#define BUSCON1_SETUP (1 << 22)
#define BUSCON1_BCGEN_RES (0x3 << 12)
#define BUSCON1_WAITWRC2 (2 << 8)
#define BUSCON1_WAITRDC2 (2 << 6)
#define BUSCON1_HOLDC1 (1 << 4)
#define BUSCON1_RECOVC1 (1 << 2)
#define BUSCON1_CMULT4 1
#define NAND_CON_CE (1 << 20)
#define NAND_CON_OUT_CS1 (1 << 10)
#define NAND_CON_IN_CS1 (1 << 8)
#define NAND_CON_PRE_P (1 << 7)
#define NAND_CON_WP_P (1 << 6)
#define NAND_CON_SE_P (1 << 5)
#define NAND_CON_CS_P (1 << 4)
#define NAND_CON_CSMUX (1 << 1)
#define NAND_CON_NANDM 1
struct xway_nand_data {
struct nand_controller controller;
struct nand_chip chip;
unsigned long csflags;
void __iomem *nandaddr;
};
static u8 xway_readb(struct mtd_info *mtd, int op)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct xway_nand_data *data = nand_get_controller_data(chip);
return readb(data->nandaddr + op);
}
static void xway_writeb(struct mtd_info *mtd, int op, u8 value)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct xway_nand_data *data = nand_get_controller_data(chip);
writeb(value, data->nandaddr + op);
}
static void xway_select_chip(struct nand_chip *chip, int select)
{
struct xway_nand_data *data = nand_get_controller_data(chip);
switch (select) {
case -1:
ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
spin_unlock_irqrestore(&ebu_lock, data->csflags);
break;
case 0:
spin_lock_irqsave(&ebu_lock, data->csflags);
ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
break;
default:
BUG();
}
}
static void xway_cmd_ctrl(struct nand_chip *chip, int cmd, unsigned int ctrl)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
xway_writeb(mtd, NAND_WRITE_CMD, cmd);
else if (ctrl & NAND_ALE)
xway_writeb(mtd, NAND_WRITE_ADDR, cmd);
while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
;
}
static int xway_dev_ready(struct nand_chip *chip)
{
return ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD;
}
static unsigned char xway_read_byte(struct nand_chip *chip)
{
return xway_readb(nand_to_mtd(chip), NAND_READ_DATA);
}
static void xway_read_buf(struct nand_chip *chip, u_char *buf, int len)
{
int i;
for (i = 0; i < len; i++)
buf[i] = xway_readb(nand_to_mtd(chip), NAND_WRITE_DATA);
}
static void xway_write_buf(struct nand_chip *chip, const u_char *buf, int len)
{
int i;
for (i = 0; i < len; i++)
xway_writeb(nand_to_mtd(chip), NAND_WRITE_DATA, buf[i]);
}
static int xway_attach_chip(struct nand_chip *chip)
{
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
static const struct nand_controller_ops xway_nand_ops = {
.attach_chip = xway_attach_chip,
};
/*
* Probe for the NAND device.
*/
static int xway_nand_probe(struct platform_device *pdev)
{
struct xway_nand_data *data;
struct mtd_info *mtd;
int err;
u32 cs;
u32 cs_flag = 0;
/* Allocate memory for the device structure (and zero it) */
data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
data->nandaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->nandaddr))
return PTR_ERR(data->nandaddr);
nand_set_flash_node(&data->chip, pdev->dev.of_node);
mtd = nand_to_mtd(&data->chip);
mtd->dev.parent = &pdev->dev;
data->chip.legacy.cmd_ctrl = xway_cmd_ctrl;
data->chip.legacy.dev_ready = xway_dev_ready;
data->chip.legacy.select_chip = xway_select_chip;
data->chip.legacy.write_buf = xway_write_buf;
data->chip.legacy.read_buf = xway_read_buf;
data->chip.legacy.read_byte = xway_read_byte;
data->chip.legacy.chip_delay = 30;
nand_controller_init(&data->controller);
data->controller.ops = &xway_nand_ops;
data->chip.controller = &data->controller;
platform_set_drvdata(pdev, data);
nand_set_controller_data(&data->chip, data);
/* load our CS from the DT. Either we find a valid 1 or default to 0 */
err = of_property_read_u32(pdev->dev.of_node, "lantiq,cs", &cs);
if (!err && cs == 1)
cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
/* setup the EBU to run in NAND mode on our base addr */
ltq_ebu_w32(CPHYSADDR(data->nandaddr)
| ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2
| BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
| BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P
| NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
| cs_flag, EBU_NAND_CON);
/*
* This driver assumes that the default ECC engine should be TYPE_SOFT.
* Set ->engine_type before registering the NAND devices in order to
* provide a driver specific default value.
*/
data->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
/* Scan to find existence of the device */
err = nand_scan(&data->chip, 1);
if (err)
return err;
err = mtd_device_register(mtd, NULL, 0);
if (err)
nand_cleanup(&data->chip);
return err;
}
/*
* Remove a NAND device.
*/
static void xway_nand_remove(struct platform_device *pdev)
{
struct xway_nand_data *data = platform_get_drvdata(pdev);
struct nand_chip *chip = &data->chip;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
}
static const struct of_device_id xway_nand_match[] = {
{ .compatible = "lantiq,nand-xway" },
{},
};
static struct platform_driver xway_nand_driver = {
.probe = xway_nand_probe,
.remove_new = xway_nand_remove,
.driver = {
.name = "lantiq,nand-xway",
.of_match_table = xway_nand_match,
},
};
builtin_platform_driver(xway_nand_driver);
| linux-master | drivers/mtd/nand/raw/xway_nand.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Overview:
* Bad block table support for the NAND driver
*
* Copyright © 2004 Thomas Gleixner ([email protected])
*
* Description:
*
* When nand_scan_bbt is called, then it tries to find the bad block table
* depending on the options in the BBT descriptor(s). If no flash based BBT
* (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory
* marked good / bad blocks. This information is used to create a memory BBT.
* Once a new bad block is discovered then the "factory" information is updated
* on the device.
* If a flash based BBT is specified then the function first tries to find the
* BBT on flash. If a BBT is found then the contents are read and the memory
* based BBT is created. If a mirrored BBT is selected then the mirror is
* searched too and the versions are compared. If the mirror has a greater
* version number, then the mirror BBT is used to build the memory based BBT.
* If the tables are not versioned, then we "or" the bad block information.
* If one of the BBTs is out of date or does not exist it is (re)created.
* If no BBT exists at all then the device is scanned for factory marked
* good / bad blocks and the bad block tables are created.
*
* For manufacturer created BBTs like the one found on M-SYS DOC devices
* the BBT is searched and read but never created
*
* The auto generated bad block table is located in the last good blocks
* of the device. The table is mirrored, so it can be updated eventually.
* The table is marked in the OOB area with an ident pattern and a version
* number which indicates which of both tables is more up to date. If the NAND
* controller needs the complete OOB area for the ECC information then the
* option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of
* course): it moves the ident pattern and the version byte into the data area
* and the OOB area will remain untouched.
*
* The table uses 2 bits per block
* 11b: block is good
* 00b: block is factory marked bad
* 01b, 10b: block is marked bad due to wear
*
* The memory bad block table uses the following scheme:
* 00b: block is good
* 01b: block is marked bad due to wear
* 10b: block is reserved (to protect the bbt area)
* 11b: block is factory marked bad
*
* Multichip devices like DOC store the bad block info per floor.
*
* Following assumptions are made:
* - bbts start at a page boundary, if autolocated on a block boundary
* - the space necessary for a bbt in FLASH does not exceed a block boundary
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/bbm.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <linux/string.h>
#include "internals.h"
#define BBT_BLOCK_GOOD 0x00
#define BBT_BLOCK_WORN 0x01
#define BBT_BLOCK_RESERVED 0x02
#define BBT_BLOCK_FACTORY_BAD 0x03
#define BBT_ENTRY_MASK 0x03
#define BBT_ENTRY_SHIFT 2
static inline uint8_t bbt_get_entry(struct nand_chip *chip, int block)
{
uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
entry >>= (block & BBT_ENTRY_MASK) * 2;
return entry & BBT_ENTRY_MASK;
}
static inline void bbt_mark_entry(struct nand_chip *chip, int block,
uint8_t mark)
{
uint8_t msk = (mark & BBT_ENTRY_MASK) << ((block & BBT_ENTRY_MASK) * 2);
chip->bbt[block >> BBT_ENTRY_SHIFT] |= msk;
}
static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
{
if (memcmp(buf, td->pattern, td->len))
return -1;
return 0;
}
/**
* check_pattern - [GENERIC] check if a pattern is in the buffer
* @buf: the buffer to search
* @len: the length of buffer to search
* @paglen: the pagelength
* @td: search pattern descriptor
*
* Check for a pattern at the given place. Used to search bad block tables and
* good / bad block identifiers.
*/
static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
{
if (td->options & NAND_BBT_NO_OOB)
return check_pattern_no_oob(buf, td);
/* Compare the pattern */
if (memcmp(buf + paglen + td->offs, td->pattern, td->len))
return -1;
return 0;
}
/**
* check_short_pattern - [GENERIC] check if a pattern is in the buffer
* @buf: the buffer to search
* @td: search pattern descriptor
*
* Check for a pattern at the given place. Used to search bad block tables and
* good / bad block identifiers. Same as check_pattern, but no optional empty
* check.
*/
static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
{
/* Compare the pattern */
if (memcmp(buf + td->offs, td->pattern, td->len))
return -1;
return 0;
}
/**
* add_marker_len - compute the length of the marker in data area
* @td: BBT descriptor used for computation
*
* The length will be 0 if the marker is located in OOB area.
*/
static u32 add_marker_len(struct nand_bbt_descr *td)
{
u32 len;
if (!(td->options & NAND_BBT_NO_OOB))
return 0;
len = td->len;
if (td->options & NAND_BBT_VERSION)
len++;
return len;
}
/**
* read_bbt - [GENERIC] Read the bad block table starting from page
* @this: NAND chip object
* @buf: temporary buffer
* @page: the starting page
* @num: the number of bbt descriptors to read
* @td: the bbt describtion table
* @offs: block number offset in the table
*
* Read the bad block table starting from page.
*/
static int read_bbt(struct nand_chip *this, uint8_t *buf, int page, int num,
struct nand_bbt_descr *td, int offs)
{
struct mtd_info *mtd = nand_to_mtd(this);
int res, ret = 0, i, j, act = 0;
size_t retlen, len, totlen;
loff_t from;
int bits = td->options & NAND_BBT_NRBITS_MSK;
uint8_t msk = (uint8_t)((1 << bits) - 1);
u32 marker_len;
int reserved_block_code = td->reserved_block_code;
totlen = (num * bits) >> 3;
marker_len = add_marker_len(td);
from = ((loff_t)page) << this->page_shift;
while (totlen) {
len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
if (marker_len) {
/*
* In case the BBT marker is not in the OOB area it
* will be just in the first page.
*/
len -= marker_len;
from += marker_len;
marker_len = 0;
}
res = mtd_read(mtd, from, len, &retlen, buf);
if (res < 0) {
if (mtd_is_eccerr(res)) {
pr_info("nand_bbt: ECC error in BBT at 0x%012llx\n",
from & ~mtd->writesize);
return res;
} else if (mtd_is_bitflip(res)) {
pr_info("nand_bbt: corrected error in BBT at 0x%012llx\n",
from & ~mtd->writesize);
ret = res;
} else {
pr_info("nand_bbt: error reading BBT\n");
return res;
}
}
/* Analyse data */
for (i = 0; i < len; i++) {
uint8_t dat = buf[i];
for (j = 0; j < 8; j += bits, act++) {
uint8_t tmp = (dat >> j) & msk;
if (tmp == msk)
continue;
if (reserved_block_code && (tmp == reserved_block_code)) {
pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
(loff_t)(offs + act) <<
this->bbt_erase_shift);
bbt_mark_entry(this, offs + act,
BBT_BLOCK_RESERVED);
mtd->ecc_stats.bbtblocks++;
continue;
}
/*
* Leave it for now, if it's matured we can
* move this message to pr_debug.
*/
pr_info("nand_read_bbt: bad block at 0x%012llx\n",
(loff_t)(offs + act) <<
this->bbt_erase_shift);
/* Factory marked bad or worn out? */
if (tmp == 0)
bbt_mark_entry(this, offs + act,
BBT_BLOCK_FACTORY_BAD);
else
bbt_mark_entry(this, offs + act,
BBT_BLOCK_WORN);
mtd->ecc_stats.badblocks++;
}
}
totlen -= len;
from += len;
}
return ret;
}
/**
* read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
* @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @chip: read the table for a specific chip, -1 read all chips; applies only if
* NAND_BBT_PERCHIP option is set
*
* Read the bad block table for all chips starting at a given page. We assume
* that the bbt bits are in consecutive order.
*/
static int read_abs_bbt(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td, int chip)
{
struct mtd_info *mtd = nand_to_mtd(this);
u64 targetsize = nanddev_target_size(&this->base);
int res = 0, i;
if (td->options & NAND_BBT_PERCHIP) {
int offs = 0;
for (i = 0; i < nanddev_ntargets(&this->base); i++) {
if (chip == -1 || chip == i)
res = read_bbt(this, buf, td->pages[i],
targetsize >> this->bbt_erase_shift,
td, offs);
if (res)
return res;
offs += targetsize >> this->bbt_erase_shift;
}
} else {
res = read_bbt(this, buf, td->pages[0],
mtd->size >> this->bbt_erase_shift, td, 0);
if (res)
return res;
}
return 0;
}
/* BBT marker is in the first page, no OOB */
static int scan_read_data(struct nand_chip *this, uint8_t *buf, loff_t offs,
struct nand_bbt_descr *td)
{
struct mtd_info *mtd = nand_to_mtd(this);
size_t retlen;
size_t len;
len = td->len;
if (td->options & NAND_BBT_VERSION)
len++;
return mtd_read(mtd, offs, len, &retlen, buf);
}
/**
* scan_read_oob - [GENERIC] Scan data+OOB region to buffer
* @this: NAND chip object
* @buf: temporary buffer
* @offs: offset at which to scan
* @len: length of data region to read
*
* Scan read data from data+OOB. May traverse multiple pages, interleaving
* page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
* ECC condition (error or bitflip). May quit on the first (non-ECC) error.
*/
static int scan_read_oob(struct nand_chip *this, uint8_t *buf, loff_t offs,
size_t len)
{
struct mtd_info *mtd = nand_to_mtd(this);
struct mtd_oob_ops ops = { };
int res, ret = 0;
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
while (len > 0) {
ops.datbuf = buf;
ops.len = min(len, (size_t)mtd->writesize);
ops.oobbuf = buf + ops.len;
res = mtd_read_oob(mtd, offs, &ops);
if (res) {
if (!mtd_is_bitflip_or_eccerr(res))
return res;
else if (mtd_is_eccerr(res) || !ret)
ret = res;
}
buf += mtd->oobsize + mtd->writesize;
len -= mtd->writesize;
offs += mtd->writesize;
}
return ret;
}
static int scan_read(struct nand_chip *this, uint8_t *buf, loff_t offs,
size_t len, struct nand_bbt_descr *td)
{
if (td->options & NAND_BBT_NO_OOB)
return scan_read_data(this, buf, offs, td);
else
return scan_read_oob(this, buf, offs, len);
}
/* Scan write data with oob to flash */
static int scan_write_bbt(struct nand_chip *this, loff_t offs, size_t len,
uint8_t *buf, uint8_t *oob)
{
struct mtd_info *mtd = nand_to_mtd(this);
struct mtd_oob_ops ops = { };
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
ops.datbuf = buf;
ops.oobbuf = oob;
ops.len = len;
return mtd_write_oob(mtd, offs, &ops);
}
static u32 bbt_get_ver_offs(struct nand_chip *this, struct nand_bbt_descr *td)
{
struct mtd_info *mtd = nand_to_mtd(this);
u32 ver_offs = td->veroffs;
if (!(td->options & NAND_BBT_NO_OOB))
ver_offs += mtd->writesize;
return ver_offs;
}
/**
* read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
* @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @md: descriptor for the bad block table mirror
*
* Read the bad block table(s) for all chips starting at a given page. We
* assume that the bbt bits are in consecutive order.
*/
static void read_abs_bbts(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td, struct nand_bbt_descr *md)
{
struct mtd_info *mtd = nand_to_mtd(this);
/* Read the primary version, if available */
if (td->options & NAND_BBT_VERSION) {
scan_read(this, buf, (loff_t)td->pages[0] << this->page_shift,
mtd->writesize, td);
td->version[0] = buf[bbt_get_ver_offs(this, td)];
pr_info("Bad block table at page %d, version 0x%02X\n",
td->pages[0], td->version[0]);
}
/* Read the mirror version, if available */
if (md && (md->options & NAND_BBT_VERSION)) {
scan_read(this, buf, (loff_t)md->pages[0] << this->page_shift,
mtd->writesize, md);
md->version[0] = buf[bbt_get_ver_offs(this, md)];
pr_info("Bad block table at page %d, version 0x%02X\n",
md->pages[0], md->version[0]);
}
}
/* Scan a given block partially */
static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
loff_t offs, uint8_t *buf)
{
struct mtd_info *mtd = nand_to_mtd(this);
struct mtd_oob_ops ops = { };
int ret, page_offset;
ops.ooblen = mtd->oobsize;
ops.oobbuf = buf;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
page_offset = nand_bbm_get_next_page(this, 0);
while (page_offset >= 0) {
/*
* Read the full oob until read_oob is fixed to handle single
* byte reads for 16 bit buswidth.
*/
ret = mtd_read_oob(mtd, offs + (page_offset * mtd->writesize),
&ops);
/* Ignore ECC errors when checking for BBM */
if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
if (check_short_pattern(buf, bd))
return 1;
page_offset = nand_bbm_get_next_page(this, page_offset + 1);
}
return 0;
}
/* Check if a potential BBT block is marked as bad */
static int bbt_block_checkbad(struct nand_chip *this, struct nand_bbt_descr *td,
loff_t offs, uint8_t *buf)
{
struct nand_bbt_descr *bd = this->badblock_pattern;
/*
* No need to check for a bad BBT block if the BBM area overlaps with
* the bad block table marker area in OOB since writing a BBM here
* invalidates the bad block table marker anyway.
*/
if (!(td->options & NAND_BBT_NO_OOB) &&
td->offs >= bd->offs && td->offs < bd->offs + bd->len)
return 0;
/*
* There is no point in checking for a bad block marker if writing
* such marker is not supported
*/
if (this->bbt_options & NAND_BBT_NO_OOB_BBM ||
this->options & NAND_NO_BBM_QUIRK)
return 0;
if (scan_block_fast(this, bd, offs, buf) > 0)
return 1;
return 0;
}
/**
* create_bbt - [GENERIC] Create a bad block table by scanning the device
* @this: NAND chip object
* @buf: temporary buffer
* @bd: descriptor for the good/bad block search pattern
* @chip: create the table for a specific chip, -1 read all chips; applies only
* if NAND_BBT_PERCHIP option is set
*
* Create a bad block table by scanning the device for the given good/bad block
* identify pattern.
*/
static int create_bbt(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *bd, int chip)
{
u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
int i, numblocks, startblock;
loff_t from;
pr_info("Scanning device for bad blocks\n");
if (chip == -1) {
numblocks = mtd->size >> this->bbt_erase_shift;
startblock = 0;
from = 0;
} else {
if (chip >= nanddev_ntargets(&this->base)) {
pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
chip + 1, nanddev_ntargets(&this->base));
return -EINVAL;
}
numblocks = targetsize >> this->bbt_erase_shift;
startblock = chip * numblocks;
numblocks += startblock;
from = (loff_t)startblock << this->bbt_erase_shift;
}
for (i = startblock; i < numblocks; i++) {
int ret;
BUG_ON(bd->options & NAND_BBT_NO_OOB);
ret = scan_block_fast(this, bd, from, buf);
if (ret < 0)
return ret;
if (ret) {
bbt_mark_entry(this, i, BBT_BLOCK_FACTORY_BAD);
pr_warn("Bad eraseblock %d at 0x%012llx\n",
i, (unsigned long long)from);
mtd->ecc_stats.badblocks++;
}
from += (1 << this->bbt_erase_shift);
}
return 0;
}
/**
* search_bbt - [GENERIC] scan the device for a specific bad block table
* @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
*
* Read the bad block table by searching for a given ident pattern. Search is
* preformed either from the beginning up or from the end of the device
* downwards. The search starts always at the start of a block. If the option
* NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
* the bad block information of this chip. This is necessary to provide support
* for certain DOC devices.
*
* The bbt ident pattern resides in the oob area of the first page in a block.
*/
static int search_bbt(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td)
{
u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
int i, chips;
int startblock, block, dir;
int scanlen = mtd->writesize + mtd->oobsize;
int bbtblocks;
int blocktopage = this->bbt_erase_shift - this->page_shift;
/* Search direction top -> down? */
if (td->options & NAND_BBT_LASTBLOCK) {
startblock = (mtd->size >> this->bbt_erase_shift) - 1;
dir = -1;
} else {
startblock = 0;
dir = 1;
}
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chips = nanddev_ntargets(&this->base);
bbtblocks = targetsize >> this->bbt_erase_shift;
startblock &= bbtblocks - 1;
} else {
chips = 1;
bbtblocks = mtd->size >> this->bbt_erase_shift;
}
for (i = 0; i < chips; i++) {
/* Reset version information */
td->version[i] = 0;
td->pages[i] = -1;
/* Scan the maximum number of blocks */
for (block = 0; block < td->maxblocks; block++) {
int actblock = startblock + dir * block;
loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
/* Check if block is marked bad */
if (bbt_block_checkbad(this, td, offs, buf))
continue;
/* Read first page */
scan_read(this, buf, offs, mtd->writesize, td);
if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
td->pages[i] = actblock << blocktopage;
if (td->options & NAND_BBT_VERSION) {
offs = bbt_get_ver_offs(this, td);
td->version[i] = buf[offs];
}
break;
}
}
startblock += targetsize >> this->bbt_erase_shift;
}
/* Check, if we found a bbt for each requested chip */
for (i = 0; i < chips; i++) {
if (td->pages[i] == -1)
pr_warn("Bad block table not found for chip %d\n", i);
else
pr_info("Bad block table found at page %d, version 0x%02X\n",
td->pages[i], td->version[i]);
}
return 0;
}
/**
* search_read_bbts - [GENERIC] scan the device for bad block table(s)
* @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @md: descriptor for the bad block table mirror
*
* Search and read the bad block table(s).
*/
static void search_read_bbts(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td,
struct nand_bbt_descr *md)
{
/* Search the primary table */
search_bbt(this, buf, td);
/* Search the mirror table */
if (md)
search_bbt(this, buf, md);
}
/**
* get_bbt_block - Get the first valid eraseblock suitable to store a BBT
* @this: the NAND device
* @td: the BBT description
* @md: the mirror BBT descriptor
* @chip: the CHIP selector
*
* This functions returns a positive block number pointing a valid eraseblock
* suitable to store a BBT (i.e. in the range reserved for BBT), or -ENOSPC if
* all blocks are already used of marked bad. If td->pages[chip] was already
* pointing to a valid block we re-use it, otherwise we search for the next
* valid one.
*/
static int get_bbt_block(struct nand_chip *this, struct nand_bbt_descr *td,
struct nand_bbt_descr *md, int chip)
{
u64 targetsize = nanddev_target_size(&this->base);
int startblock, dir, page, numblocks, i;
/*
* There was already a version of the table, reuse the page. This
* applies for absolute placement too, as we have the page number in
* td->pages.
*/
if (td->pages[chip] != -1)
return td->pages[chip] >>
(this->bbt_erase_shift - this->page_shift);
numblocks = (int)(targetsize >> this->bbt_erase_shift);
if (!(td->options & NAND_BBT_PERCHIP))
numblocks *= nanddev_ntargets(&this->base);
/*
* Automatic placement of the bad block table. Search direction
* top -> down?
*/
if (td->options & NAND_BBT_LASTBLOCK) {
startblock = numblocks * (chip + 1) - 1;
dir = -1;
} else {
startblock = chip * numblocks;
dir = 1;
}
for (i = 0; i < td->maxblocks; i++) {
int block = startblock + dir * i;
/* Check, if the block is bad */
switch (bbt_get_entry(this, block)) {
case BBT_BLOCK_WORN:
case BBT_BLOCK_FACTORY_BAD:
continue;
}
page = block << (this->bbt_erase_shift - this->page_shift);
/* Check, if the block is used by the mirror table */
if (!md || md->pages[chip] != page)
return block;
}
return -ENOSPC;
}
/**
* mark_bbt_block_bad - Mark one of the block reserved for BBT bad
* @this: the NAND device
* @td: the BBT description
* @chip: the CHIP selector
* @block: the BBT block to mark
*
* Blocks reserved for BBT can become bad. This functions is an helper to mark
* such blocks as bad. It takes care of updating the in-memory BBT, marking the
* block as bad using a bad block marker and invalidating the associated
* td->pages[] entry.
*/
static void mark_bbt_block_bad(struct nand_chip *this,
struct nand_bbt_descr *td,
int chip, int block)
{
loff_t to;
int res;
bbt_mark_entry(this, block, BBT_BLOCK_WORN);
to = (loff_t)block << this->bbt_erase_shift;
res = nand_markbad_bbm(this, to);
if (res)
pr_warn("nand_bbt: error %d while marking block %d bad\n",
res, block);
td->pages[chip] = -1;
}
/**
* write_bbt - [GENERIC] (Re)write the bad block table
* @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @md: descriptor for the bad block table mirror
* @chipsel: selector for a specific chip, -1 for all
*
* (Re)write the bad block table.
*/
static int write_bbt(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td, struct nand_bbt_descr *md,
int chipsel)
{
u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
struct erase_info einfo;
int i, res, chip = 0;
int bits, page, offs, numblocks, sft, sftmsk;
int nrchips, pageoffs, ooboffs;
uint8_t msk[4];
uint8_t rcode = td->reserved_block_code;
size_t retlen, len = 0;
loff_t to;
struct mtd_oob_ops ops = { };
ops.ooblen = mtd->oobsize;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
if (!rcode)
rcode = 0xff;
/* Write bad block table per chip rather than per device? */
if (td->options & NAND_BBT_PERCHIP) {
numblocks = (int)(targetsize >> this->bbt_erase_shift);
/* Full device write or specific chip? */
if (chipsel == -1) {
nrchips = nanddev_ntargets(&this->base);
} else {
nrchips = chipsel + 1;
chip = chipsel;
}
} else {
numblocks = (int)(mtd->size >> this->bbt_erase_shift);
nrchips = 1;
}
/* Loop through the chips */
while (chip < nrchips) {
int block;
block = get_bbt_block(this, td, md, chip);
if (block < 0) {
pr_err("No space left to write bad block table\n");
res = block;
goto outerr;
}
/*
* get_bbt_block() returns a block number, shift the value to
* get a page number.
*/
page = block << (this->bbt_erase_shift - this->page_shift);
/* Set up shift count and masks for the flash table */
bits = td->options & NAND_BBT_NRBITS_MSK;
msk[2] = ~rcode;
switch (bits) {
case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01;
msk[3] = 0x01;
break;
case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01;
msk[3] = 0x03;
break;
case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C;
msk[3] = 0x0f;
break;
case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F;
msk[3] = 0xff;
break;
default: return -EINVAL;
}
to = ((loff_t)page) << this->page_shift;
/* Must we save the block contents? */
if (td->options & NAND_BBT_SAVECONTENT) {
/* Make it block aligned */
to &= ~(((loff_t)1 << this->bbt_erase_shift) - 1);
len = 1 << this->bbt_erase_shift;
res = mtd_read(mtd, to, len, &retlen, buf);
if (res < 0) {
if (retlen != len) {
pr_info("nand_bbt: error reading block for writing the bad block table\n");
return res;
}
pr_warn("nand_bbt: ECC error while reading block for writing bad block table\n");
}
/* Read oob data */
ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
ops.oobbuf = &buf[len];
res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
if (res < 0 || ops.oobretlen != ops.ooblen)
goto outerr;
/* Calc the byte offset in the buffer */
pageoffs = page - (int)(to >> this->page_shift);
offs = pageoffs << this->page_shift;
/* Preset the bbt area with 0xff */
memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
ooboffs = len + (pageoffs * mtd->oobsize);
} else if (td->options & NAND_BBT_NO_OOB) {
ooboffs = 0;
offs = td->len;
/* The version byte */
if (td->options & NAND_BBT_VERSION)
offs++;
/* Calc length */
len = (size_t)(numblocks >> sft);
len += offs;
/* Make it page aligned! */
len = ALIGN(len, mtd->writesize);
/* Preset the buffer with 0xff */
memset(buf, 0xff, len);
/* Pattern is located at the begin of first page */
memcpy(buf, td->pattern, td->len);
} else {
/* Calc length */
len = (size_t)(numblocks >> sft);
/* Make it page aligned! */
len = ALIGN(len, mtd->writesize);
/* Preset the buffer with 0xff */
memset(buf, 0xff, len +
(len >> this->page_shift)* mtd->oobsize);
offs = 0;
ooboffs = len;
/* Pattern is located in oob area of first page */
memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
}
if (td->options & NAND_BBT_VERSION)
buf[ooboffs + td->veroffs] = td->version[chip];
/* Walk through the memory table */
for (i = 0; i < numblocks; i++) {
uint8_t dat;
int sftcnt = (i << (3 - sft)) & sftmsk;
dat = bbt_get_entry(this, chip * numblocks + i);
/* Do not store the reserved bbt blocks! */
buf[offs + (i >> sft)] &= ~(msk[dat] << sftcnt);
}
memset(&einfo, 0, sizeof(einfo));
einfo.addr = to;
einfo.len = 1 << this->bbt_erase_shift;
res = nand_erase_nand(this, &einfo, 1);
if (res < 0) {
pr_warn("nand_bbt: error while erasing BBT block %d\n",
res);
mark_bbt_block_bad(this, td, chip, block);
continue;
}
res = scan_write_bbt(this, to, len, buf,
td->options & NAND_BBT_NO_OOB ?
NULL : &buf[len]);
if (res < 0) {
pr_warn("nand_bbt: error while writing BBT block %d\n",
res);
mark_bbt_block_bad(this, td, chip, block);
continue;
}
pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
(unsigned long long)to, td->version[chip]);
/* Mark it as used */
td->pages[chip++] = page;
}
return 0;
outerr:
pr_warn("nand_bbt: error while writing bad block table %d\n", res);
return res;
}
/**
* nand_memory_bbt - [GENERIC] create a memory based bad block table
* @this: NAND chip object
* @bd: descriptor for the good/bad block search pattern
*
* The function creates a memory based bbt by scanning the device for
* manufacturer / software marked good / bad blocks.
*/
static inline int nand_memory_bbt(struct nand_chip *this,
struct nand_bbt_descr *bd)
{
u8 *pagebuf = nand_get_data_buf(this);
return create_bbt(this, pagebuf, bd, -1);
}
/**
* check_create - [GENERIC] create and write bbt(s) if necessary
* @this: the NAND device
* @buf: temporary buffer
* @bd: descriptor for the good/bad block search pattern
*
* The function checks the results of the previous call to read_bbt and creates
* / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
* for the chip/device. Update is necessary if one of the tables is missing or
* the version nr. of one table is less than the other.
*/
static int check_create(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *bd)
{
int i, chips, writeops, create, chipsel, res, res2;
struct nand_bbt_descr *td = this->bbt_td;
struct nand_bbt_descr *md = this->bbt_md;
struct nand_bbt_descr *rd, *rd2;
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP)
chips = nanddev_ntargets(&this->base);
else
chips = 1;
for (i = 0; i < chips; i++) {
writeops = 0;
create = 0;
rd = NULL;
rd2 = NULL;
res = res2 = 0;
/* Per chip or per device? */
chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
/* Mirrored table available? */
if (md) {
if (td->pages[i] == -1 && md->pages[i] == -1) {
create = 1;
writeops = 0x03;
} else if (td->pages[i] == -1) {
rd = md;
writeops = 0x01;
} else if (md->pages[i] == -1) {
rd = td;
writeops = 0x02;
} else if (td->version[i] == md->version[i]) {
rd = td;
if (!(td->options & NAND_BBT_VERSION))
rd2 = md;
} else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
rd = td;
writeops = 0x02;
} else {
rd = md;
writeops = 0x01;
}
} else {
if (td->pages[i] == -1) {
create = 1;
writeops = 0x01;
} else {
rd = td;
}
}
if (create) {
/* Create the bad block table by scanning the device? */
if (!(td->options & NAND_BBT_CREATE))
continue;
/* Create the table in memory by scanning the chip(s) */
if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
create_bbt(this, buf, bd, chipsel);
td->version[i] = 1;
if (md)
md->version[i] = 1;
}
/* Read back first? */
if (rd) {
res = read_abs_bbt(this, buf, rd, chipsel);
if (mtd_is_eccerr(res)) {
/* Mark table as invalid */
rd->pages[i] = -1;
rd->version[i] = 0;
i--;
continue;
}
}
/* If they weren't versioned, read both */
if (rd2) {
res2 = read_abs_bbt(this, buf, rd2, chipsel);
if (mtd_is_eccerr(res2)) {
/* Mark table as invalid */
rd2->pages[i] = -1;
rd2->version[i] = 0;
i--;
continue;
}
}
/* Scrub the flash table(s)? */
if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
writeops = 0x03;
/* Update version numbers before writing */
if (md) {
td->version[i] = max(td->version[i], md->version[i]);
md->version[i] = td->version[i];
}
/* Write the bad block table to the device? */
if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
res = write_bbt(this, buf, td, md, chipsel);
if (res < 0)
return res;
}
/* Write the mirror bad block table to the device? */
if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
res = write_bbt(this, buf, md, td, chipsel);
if (res < 0)
return res;
}
}
return 0;
}
/**
* nand_update_bbt - update bad block table(s)
* @this: the NAND device
* @offs: the offset of the newly marked block
*
* The function updates the bad block table(s).
*/
static int nand_update_bbt(struct nand_chip *this, loff_t offs)
{
struct mtd_info *mtd = nand_to_mtd(this);
int len, res = 0;
int chip, chipsel;
uint8_t *buf;
struct nand_bbt_descr *td = this->bbt_td;
struct nand_bbt_descr *md = this->bbt_md;
if (!this->bbt || !td)
return -EINVAL;
/* Allocate a temporary buffer for one eraseblock incl. oob */
len = (1 << this->bbt_erase_shift);
len += (len >> this->page_shift) * mtd->oobsize;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chip = (int)(offs >> this->chip_shift);
chipsel = chip;
} else {
chip = 0;
chipsel = -1;
}
td->version[chip]++;
if (md)
md->version[chip]++;
/* Write the bad block table to the device? */
if (td->options & NAND_BBT_WRITE) {
res = write_bbt(this, buf, td, md, chipsel);
if (res < 0)
goto out;
}
/* Write the mirror bad block table to the device? */
if (md && (md->options & NAND_BBT_WRITE)) {
res = write_bbt(this, buf, md, td, chipsel);
}
out:
kfree(buf);
return res;
}
/**
* mark_bbt_region - [GENERIC] mark the bad block table regions
* @this: the NAND device
* @td: bad block table descriptor
*
* The bad block table regions are marked as "bad" to prevent accidental
* erasures / writes. The regions are identified by the mark 0x02.
*/
static void mark_bbt_region(struct nand_chip *this, struct nand_bbt_descr *td)
{
u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
int i, j, chips, block, nrblocks, update;
uint8_t oldval;
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
chips = nanddev_ntargets(&this->base);
nrblocks = (int)(targetsize >> this->bbt_erase_shift);
} else {
chips = 1;
nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
}
for (i = 0; i < chips; i++) {
if ((td->options & NAND_BBT_ABSPAGE) ||
!(td->options & NAND_BBT_WRITE)) {
if (td->pages[i] == -1)
continue;
block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
oldval = bbt_get_entry(this, block);
bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
if ((oldval != BBT_BLOCK_RESERVED) &&
td->reserved_block_code)
nand_update_bbt(this, (loff_t)block <<
this->bbt_erase_shift);
continue;
}
update = 0;
if (td->options & NAND_BBT_LASTBLOCK)
block = ((i + 1) * nrblocks) - td->maxblocks;
else
block = i * nrblocks;
for (j = 0; j < td->maxblocks; j++) {
oldval = bbt_get_entry(this, block);
bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
if (oldval != BBT_BLOCK_RESERVED)
update = 1;
block++;
}
/*
* If we want reserved blocks to be recorded to flash, and some
* new ones have been marked, then we need to update the stored
* bbts. This should only happen once.
*/
if (update && td->reserved_block_code)
nand_update_bbt(this, (loff_t)(block - 1) <<
this->bbt_erase_shift);
}
}
/**
* verify_bbt_descr - verify the bad block description
* @this: the NAND device
* @bd: the table to verify
*
* This functions performs a few sanity checks on the bad block description
* table.
*/
static void verify_bbt_descr(struct nand_chip *this, struct nand_bbt_descr *bd)
{
u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
u32 pattern_len;
u32 bits;
u32 table_size;
if (!bd)
return;
pattern_len = bd->len;
bits = bd->options & NAND_BBT_NRBITS_MSK;
BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
!(this->bbt_options & NAND_BBT_USE_FLASH));
BUG_ON(!bits);
if (bd->options & NAND_BBT_VERSION)
pattern_len++;
if (bd->options & NAND_BBT_NO_OOB) {
BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
BUG_ON(bd->offs);
if (bd->options & NAND_BBT_VERSION)
BUG_ON(bd->veroffs != bd->len);
BUG_ON(bd->options & NAND_BBT_SAVECONTENT);
}
if (bd->options & NAND_BBT_PERCHIP)
table_size = targetsize >> this->bbt_erase_shift;
else
table_size = mtd->size >> this->bbt_erase_shift;
table_size >>= 3;
table_size *= bits;
if (bd->options & NAND_BBT_NO_OOB)
table_size += pattern_len;
BUG_ON(table_size > (1 << this->bbt_erase_shift));
}
/**
* nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
* @this: the NAND device
* @bd: descriptor for the good/bad block search pattern
*
* The function checks, if a bad block table(s) is/are already available. If
* not it scans the device for manufacturer marked good / bad blocks and writes
* the bad block table(s) to the selected place.
*
* The bad block table memory is allocated here. It must be freed by calling
* the nand_free_bbt function.
*/
static int nand_scan_bbt(struct nand_chip *this, struct nand_bbt_descr *bd)
{
struct mtd_info *mtd = nand_to_mtd(this);
int len, res;
uint8_t *buf;
struct nand_bbt_descr *td = this->bbt_td;
struct nand_bbt_descr *md = this->bbt_md;
len = (mtd->size >> (this->bbt_erase_shift + 2)) ? : 1;
/*
* Allocate memory (2bit per block) and clear the memory bad block
* table.
*/
this->bbt = kzalloc(len, GFP_KERNEL);
if (!this->bbt)
return -ENOMEM;
/*
* If no primary table descriptor is given, scan the device to build a
* memory based bad block table.
*/
if (!td) {
if ((res = nand_memory_bbt(this, bd))) {
pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
goto err_free_bbt;
}
return 0;
}
verify_bbt_descr(this, td);
verify_bbt_descr(this, md);
/* Allocate a temporary buffer for one eraseblock incl. oob */
len = (1 << this->bbt_erase_shift);
len += (len >> this->page_shift) * mtd->oobsize;
buf = vmalloc(len);
if (!buf) {
res = -ENOMEM;
goto err_free_bbt;
}
/* Is the bbt at a given page? */
if (td->options & NAND_BBT_ABSPAGE) {
read_abs_bbts(this, buf, td, md);
} else {
/* Search the bad block table using a pattern in oob */
search_read_bbts(this, buf, td, md);
}
res = check_create(this, buf, bd);
if (res)
goto err_free_buf;
/* Prevent the bbt regions from erasing / writing */
mark_bbt_region(this, td);
if (md)
mark_bbt_region(this, md);
vfree(buf);
return 0;
err_free_buf:
vfree(buf);
err_free_bbt:
kfree(this->bbt);
this->bbt = NULL;
return res;
}
/*
* Define some generic bad / good block scan pattern which are used
* while scanning a device for factory marked good / bad blocks.
*/
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
/* Generic flash bbt descriptors */
static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
static struct nand_bbt_descr bbt_main_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
.offs = 8,
.len = 4,
.veroffs = 12,
.maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
.pattern = bbt_pattern
};
static struct nand_bbt_descr bbt_mirror_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
.offs = 8,
.len = 4,
.veroffs = 12,
.maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
.pattern = mirror_pattern
};
static struct nand_bbt_descr bbt_main_no_oob_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
| NAND_BBT_NO_OOB,
.len = 4,
.veroffs = 4,
.maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
.pattern = bbt_pattern
};
static struct nand_bbt_descr bbt_mirror_no_oob_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
| NAND_BBT_NO_OOB,
.len = 4,
.veroffs = 4,
.maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
.pattern = mirror_pattern
};
#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
/**
* nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
* @this: NAND chip to create descriptor for
*
* This function allocates and initializes a nand_bbt_descr for BBM detection
* based on the properties of @this. The new descriptor is stored in
* this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
* passed to this function.
*/
static int nand_create_badblock_pattern(struct nand_chip *this)
{
struct nand_bbt_descr *bd;
if (this->badblock_pattern) {
pr_warn("Bad block pattern already allocated; not replacing\n");
return -EINVAL;
}
bd = kzalloc(sizeof(*bd), GFP_KERNEL);
if (!bd)
return -ENOMEM;
bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
bd->offs = this->badblockpos;
bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
bd->pattern = scan_ff_pattern;
bd->options |= NAND_BBT_DYNAMICSTRUCT;
this->badblock_pattern = bd;
return 0;
}
/**
* nand_create_bbt - [NAND Interface] Select a default bad block table for the device
* @this: NAND chip object
*
* This function selects the default bad block table support for the device and
* calls the nand_scan_bbt function.
*/
int nand_create_bbt(struct nand_chip *this)
{
int ret;
/* Is a flash based bad block table requested? */
if (this->bbt_options & NAND_BBT_USE_FLASH) {
/* Use the default pattern descriptors */
if (!this->bbt_td) {
if (this->bbt_options & NAND_BBT_NO_OOB) {
this->bbt_td = &bbt_main_no_oob_descr;
this->bbt_md = &bbt_mirror_no_oob_descr;
} else {
this->bbt_td = &bbt_main_descr;
this->bbt_md = &bbt_mirror_descr;
}
}
} else {
this->bbt_td = NULL;
this->bbt_md = NULL;
}
if (!this->badblock_pattern) {
ret = nand_create_badblock_pattern(this);
if (ret)
return ret;
}
return nand_scan_bbt(this, this->badblock_pattern);
}
EXPORT_SYMBOL(nand_create_bbt);
/**
* nand_isreserved_bbt - [NAND Interface] Check if a block is reserved
* @this: NAND chip object
* @offs: offset in the device
*/
int nand_isreserved_bbt(struct nand_chip *this, loff_t offs)
{
int block;
block = (int)(offs >> this->bbt_erase_shift);
return bbt_get_entry(this, block) == BBT_BLOCK_RESERVED;
}
/**
* nand_isbad_bbt - [NAND Interface] Check if a block is bad
* @this: NAND chip object
* @offs: offset in the device
* @allowbbt: allow access to bad block table region
*/
int nand_isbad_bbt(struct nand_chip *this, loff_t offs, int allowbbt)
{
int block, res;
block = (int)(offs >> this->bbt_erase_shift);
res = bbt_get_entry(this, block);
pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
(unsigned int)offs, block, res);
if (mtd_check_expert_analysis_mode())
return 0;
switch (res) {
case BBT_BLOCK_GOOD:
return 0;
case BBT_BLOCK_WORN:
return 1;
case BBT_BLOCK_RESERVED:
return allowbbt ? 0 : 1;
}
return 1;
}
/**
* nand_markbad_bbt - [NAND Interface] Mark a block bad in the BBT
* @this: NAND chip object
* @offs: offset of the bad block
*/
int nand_markbad_bbt(struct nand_chip *this, loff_t offs)
{
int block, ret = 0;
block = (int)(offs >> this->bbt_erase_shift);
/* Mark bad block in memory */
bbt_mark_entry(this, block, BBT_BLOCK_WORN);
/* Update flash-based bad block table */
if (this->bbt_options & NAND_BBT_USE_FLASH)
ret = nand_update_bbt(this, offs);
return ret;
}
| linux-master | drivers/mtd/nand/raw/nand_bbt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NAND flash simulator.
*
* Author: Artem B. Bityuckiy <[email protected]>, <[email protected]>
*
* Copyright (C) 2004 Nokia Corporation
*
* Note: NS means "NAND Simulator".
* Note: Input means input TO flash chip, output means output FROM chip.
*/
#define pr_fmt(fmt) "[nandsim]" fmt
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/math64.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
/* Default simulator parameters values */
#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
!defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \
!defined(CONFIG_NANDSIM_THIRD_ID_BYTE) || \
!defined(CONFIG_NANDSIM_FOURTH_ID_BYTE)
#define CONFIG_NANDSIM_FIRST_ID_BYTE 0x98
#define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39
#define CONFIG_NANDSIM_THIRD_ID_BYTE 0xFF /* No byte */
#define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF /* No byte */
#endif
#ifndef CONFIG_NANDSIM_ACCESS_DELAY
#define CONFIG_NANDSIM_ACCESS_DELAY 25
#endif
#ifndef CONFIG_NANDSIM_PROGRAMM_DELAY
#define CONFIG_NANDSIM_PROGRAMM_DELAY 200
#endif
#ifndef CONFIG_NANDSIM_ERASE_DELAY
#define CONFIG_NANDSIM_ERASE_DELAY 2
#endif
#ifndef CONFIG_NANDSIM_OUTPUT_CYCLE
#define CONFIG_NANDSIM_OUTPUT_CYCLE 40
#endif
#ifndef CONFIG_NANDSIM_INPUT_CYCLE
#define CONFIG_NANDSIM_INPUT_CYCLE 50
#endif
#ifndef CONFIG_NANDSIM_BUS_WIDTH
#define CONFIG_NANDSIM_BUS_WIDTH 8
#endif
#ifndef CONFIG_NANDSIM_DO_DELAYS
#define CONFIG_NANDSIM_DO_DELAYS 0
#endif
#ifndef CONFIG_NANDSIM_LOG
#define CONFIG_NANDSIM_LOG 0
#endif
#ifndef CONFIG_NANDSIM_DBG
#define CONFIG_NANDSIM_DBG 0
#endif
#ifndef CONFIG_NANDSIM_MAX_PARTS
#define CONFIG_NANDSIM_MAX_PARTS 32
#endif
static uint access_delay = CONFIG_NANDSIM_ACCESS_DELAY;
static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
static uint erase_delay = CONFIG_NANDSIM_ERASE_DELAY;
static uint output_cycle = CONFIG_NANDSIM_OUTPUT_CYCLE;
static uint input_cycle = CONFIG_NANDSIM_INPUT_CYCLE;
static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
static uint log = CONFIG_NANDSIM_LOG;
static uint dbg = CONFIG_NANDSIM_DBG;
static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
static unsigned int parts_num;
static char *badblocks = NULL;
static char *weakblocks = NULL;
static char *weakpages = NULL;
static unsigned int bitflips = 0;
static char *gravepages = NULL;
static unsigned int overridesize = 0;
static char *cache_file = NULL;
static unsigned int bbt;
static unsigned int bch;
static u_char id_bytes[8] = {
[0] = CONFIG_NANDSIM_FIRST_ID_BYTE,
[1] = CONFIG_NANDSIM_SECOND_ID_BYTE,
[2] = CONFIG_NANDSIM_THIRD_ID_BYTE,
[3] = CONFIG_NANDSIM_FOURTH_ID_BYTE,
[4 ... 7] = 0xFF,
};
module_param_array(id_bytes, byte, NULL, 0400);
module_param_named(first_id_byte, id_bytes[0], byte, 0400);
module_param_named(second_id_byte, id_bytes[1], byte, 0400);
module_param_named(third_id_byte, id_bytes[2], byte, 0400);
module_param_named(fourth_id_byte, id_bytes[3], byte, 0400);
module_param(access_delay, uint, 0400);
module_param(programm_delay, uint, 0400);
module_param(erase_delay, uint, 0400);
module_param(output_cycle, uint, 0400);
module_param(input_cycle, uint, 0400);
module_param(bus_width, uint, 0400);
module_param(do_delays, uint, 0400);
module_param(log, uint, 0400);
module_param(dbg, uint, 0400);
module_param_array(parts, ulong, &parts_num, 0400);
module_param(badblocks, charp, 0400);
module_param(weakblocks, charp, 0400);
module_param(weakpages, charp, 0400);
module_param(bitflips, uint, 0400);
module_param(gravepages, charp, 0400);
module_param(overridesize, uint, 0400);
module_param(cache_file, charp, 0400);
module_param(bbt, uint, 0400);
module_param(bch, uint, 0400);
MODULE_PARM_DESC(id_bytes, "The ID bytes returned by NAND Flash 'read ID' command");
MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID) (obsolete)");
MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID) (obsolete)");
MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command (obsolete)");
MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command (obsolete)");
MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
MODULE_PARM_DESC(log, "Perform logging if not zero");
MODULE_PARM_DESC(dbg, "Output debug information if not zero");
MODULE_PARM_DESC(parts, "Partition sizes (in erase blocks) separated by commas");
/* Page and erase block positions for the following parameters are independent of any partitions */
MODULE_PARM_DESC(badblocks, "Erase blocks that are initially marked bad, separated by commas");
MODULE_PARM_DESC(weakblocks, "Weak erase blocks [: remaining erase cycles (defaults to 3)]"
" separated by commas e.g. 113:2 means eb 113"
" can be erased only twice before failing");
MODULE_PARM_DESC(weakpages, "Weak pages [: maximum writes (defaults to 3)]"
" separated by commas e.g. 1401:2 means page 1401"
" can be written only twice before failing");
MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (zero by default)");
MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
" separated by commas e.g. 1401:2 means page 1401"
" can be read only twice before failing");
MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
"The size is specified in erase blocks and as the exponent of a power of two"
" e.g. 5 means a size of 32 erase blocks");
MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area");
MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
"be correctable in 512-byte blocks");
/* The largest possible page size */
#define NS_LARGEST_PAGE_SIZE 4096
/* Simulator's output macros (logging, debugging, warning, error) */
#define NS_LOG(args...) \
do { if (log) pr_debug(" log: " args); } while(0)
#define NS_DBG(args...) \
do { if (dbg) pr_debug(" debug: " args); } while(0)
#define NS_WARN(args...) \
do { pr_warn(" warning: " args); } while(0)
#define NS_ERR(args...) \
do { pr_err(" error: " args); } while(0)
#define NS_INFO(args...) \
do { pr_info(" " args); } while(0)
/* Busy-wait delay macros (microseconds, milliseconds) */
#define NS_UDELAY(us) \
do { if (do_delays) udelay(us); } while(0)
#define NS_MDELAY(us) \
do { if (do_delays) mdelay(us); } while(0)
/* Is the nandsim structure initialized ? */
#define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0)
/* Good operation completion status */
#define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0)))
/* Operation failed completion status */
#define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns))
/* Calculate the page offset in flash RAM image by (row, column) address */
#define NS_RAW_OFFSET(ns) \
(((ns)->regs.row * (ns)->geom.pgszoob) + (ns)->regs.column)
/* Calculate the OOB offset in flash RAM image by (row, column) address */
#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
/* Calculate the byte shift in the next page to access */
#define NS_PAGE_BYTE_SHIFT(ns) ((ns)->regs.column + (ns)->regs.off)
/* After a command is input, the simulator goes to one of the following states */
#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */
#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */
#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */
#define STATE_CMD_PAGEPROG 0x00000004 /* start page program */
#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
#define STATE_CMD_STATUS 0x00000007 /* read status */
#define STATE_CMD_SEQIN 0x00000009 /* sequential data input */
#define STATE_CMD_READID 0x0000000A /* read ID */
#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
#define STATE_CMD_RESET 0x0000000C /* reset */
#define STATE_CMD_RNDOUT 0x0000000D /* random output command */
#define STATE_CMD_RNDOUTSTART 0x0000000E /* random output start command */
#define STATE_CMD_MASK 0x0000000F /* command states mask */
/* After an address is input, the simulator goes to one of these states */
#define STATE_ADDR_PAGE 0x00000010 /* full (row, column) address is accepted */
#define STATE_ADDR_SEC 0x00000020 /* sector address was accepted */
#define STATE_ADDR_COLUMN 0x00000030 /* column address was accepted */
#define STATE_ADDR_ZERO 0x00000040 /* one byte zero address was accepted */
#define STATE_ADDR_MASK 0x00000070 /* address states mask */
/* During data input/output the simulator is in these states */
#define STATE_DATAIN 0x00000100 /* waiting for data input */
#define STATE_DATAIN_MASK 0x00000100 /* data input states mask */
#define STATE_DATAOUT 0x00001000 /* waiting for page data output */
#define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */
#define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */
#define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */
/* Previous operation is done, ready to accept new requests */
#define STATE_READY 0x00000000
/* This state is used to mark that the next state isn't known yet */
#define STATE_UNKNOWN 0x10000000
/* Simulator's actions bit masks */
#define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */
#define ACTION_PRGPAGE 0x00200000 /* program the internal buffer to flash */
#define ACTION_SECERASE 0x00300000 /* erase sector */
#define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */
#define ACTION_HALFOFF 0x00500000 /* add to address half of page */
#define ACTION_OOBOFF 0x00600000 /* add to address OOB offset */
#define ACTION_MASK 0x00700000 /* action mask */
#define NS_OPER_NUM 13 /* Number of operations supported by the simulator */
#define NS_OPER_STATES 6 /* Maximum number of states in operation */
#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */
#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
#define OPT_SMALLPAGE (OPT_PAGE512) /* 512-byte page chips */
/* Remove action bits from state */
#define NS_STATE(x) ((x) & ~ACTION_MASK)
/*
* Maximum previous states which need to be saved. Currently saving is
* only needed for page program operation with preceded read command
* (which is only valid for 512-byte pages).
*/
#define NS_MAX_PREVSTATES 1
/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
#define NS_MAX_HELD_PAGES 16
/*
* A union to represent flash memory contents and flash buffer.
*/
union ns_mem {
u_char *byte; /* for byte access */
uint16_t *word; /* for 16-bit word access */
};
/*
* The structure which describes all the internal simulator data.
*/
struct nandsim {
struct nand_chip chip;
struct nand_controller base;
struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
unsigned int nbparts;
uint busw; /* flash chip bus width (8 or 16) */
u_char ids[8]; /* chip's ID bytes */
uint32_t options; /* chip's characteristic bits */
uint32_t state; /* current chip state */
uint32_t nxstate; /* next expected state */
uint32_t *op; /* current operation, NULL operations isn't known yet */
uint32_t pstates[NS_MAX_PREVSTATES]; /* previous states */
uint16_t npstates; /* number of previous states saved */
uint16_t stateidx; /* current state index */
/* The simulated NAND flash pages array */
union ns_mem *pages;
/* Slab allocator for nand pages */
struct kmem_cache *nand_pages_slab;
/* Internal buffer of page + OOB size bytes */
union ns_mem buf;
/* NAND flash "geometry" */
struct {
uint64_t totsz; /* total flash size, bytes */
uint32_t secsz; /* flash sector (erase block) size, bytes */
uint pgsz; /* NAND flash page size, bytes */
uint oobsz; /* page OOB area size, bytes */
uint64_t totszoob; /* total flash size including OOB, bytes */
uint pgszoob; /* page size including OOB , bytes*/
uint secszoob; /* sector size including OOB, bytes */
uint pgnum; /* total number of pages */
uint pgsec; /* number of pages per sector */
uint secshift; /* bits number in sector size */
uint pgshift; /* bits number in page size */
uint pgaddrbytes; /* bytes per page address */
uint secaddrbytes; /* bytes per sector address */
uint idbytes; /* the number ID bytes that this chip outputs */
} geom;
/* NAND flash internal registers */
struct {
unsigned command; /* the command register */
u_char status; /* the status register */
uint row; /* the page number */
uint column; /* the offset within page */
uint count; /* internal counter */
uint num; /* number of bytes which must be processed */
uint off; /* fixed page offset */
} regs;
/* NAND flash lines state */
struct {
int ce; /* chip Enable */
int cle; /* command Latch Enable */
int ale; /* address Latch Enable */
int wp; /* write Protect */
} lines;
/* Fields needed when using a cache file */
struct file *cfile; /* Open file */
unsigned long *pages_written; /* Which pages have been written */
void *file_buf;
struct page *held_pages[NS_MAX_HELD_PAGES];
int held_cnt;
/* debugfs entry */
struct dentry *dent;
};
/*
* Operations array. To perform any operation the simulator must pass
* through the correspondent states chain.
*/
static struct nandsim_operations {
uint32_t reqopts; /* options which are required to perform the operation */
uint32_t states[NS_OPER_STATES]; /* operation's states */
} ops[NS_OPER_NUM] = {
/* Read page + OOB from the beginning */
{OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY,
STATE_DATAOUT, STATE_READY}},
/* Read page + OOB from the second half */
{OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY,
STATE_DATAOUT, STATE_READY}},
/* Read OOB */
{OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
STATE_DATAOUT, STATE_READY}},
/* Program page starting from the beginning */
{OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
/* Program page starting from the beginning */
{OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
/* Program page starting from the second half */
{OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
/* Program OOB */
{OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
/* Erase sector */
{OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
/* Read status */
{OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
/* Read ID */
{OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
/* Large page devices read page */
{OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
STATE_DATAOUT, STATE_READY}},
/* Large page devices random page read */
{OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY,
STATE_DATAOUT, STATE_READY}},
};
struct weak_block {
struct list_head list;
unsigned int erase_block_no;
unsigned int max_erases;
unsigned int erases_done;
};
static LIST_HEAD(weak_blocks);
struct weak_page {
struct list_head list;
unsigned int page_no;
unsigned int max_writes;
unsigned int writes_done;
};
static LIST_HEAD(weak_pages);
struct grave_page {
struct list_head list;
unsigned int page_no;
unsigned int max_reads;
unsigned int reads_done;
};
static LIST_HEAD(grave_pages);
static unsigned long *erase_block_wear = NULL;
static unsigned int wear_eb_count = 0;
static unsigned long total_wear = 0;
/* MTD structure for NAND controller */
static struct mtd_info *nsmtd;
static int ns_show(struct seq_file *m, void *private)
{
unsigned long wmin = -1, wmax = 0, avg;
unsigned long deciles[10], decile_max[10], tot = 0;
unsigned int i;
/* Calc wear stats */
for (i = 0; i < wear_eb_count; ++i) {
unsigned long wear = erase_block_wear[i];
if (wear < wmin)
wmin = wear;
if (wear > wmax)
wmax = wear;
tot += wear;
}
for (i = 0; i < 9; ++i) {
deciles[i] = 0;
decile_max[i] = (wmax * (i + 1) + 5) / 10;
}
deciles[9] = 0;
decile_max[9] = wmax;
for (i = 0; i < wear_eb_count; ++i) {
int d;
unsigned long wear = erase_block_wear[i];
for (d = 0; d < 10; ++d)
if (wear <= decile_max[d]) {
deciles[d] += 1;
break;
}
}
avg = tot / wear_eb_count;
/* Output wear report */
seq_printf(m, "Total numbers of erases: %lu\n", tot);
seq_printf(m, "Number of erase blocks: %u\n", wear_eb_count);
seq_printf(m, "Average number of erases: %lu\n", avg);
seq_printf(m, "Maximum number of erases: %lu\n", wmax);
seq_printf(m, "Minimum number of erases: %lu\n", wmin);
for (i = 0; i < 10; ++i) {
unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
if (from > decile_max[i])
continue;
seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n",
from,
decile_max[i],
deciles[i]);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ns);
/**
* ns_debugfs_create - initialize debugfs
* @ns: nandsim device description object
*
* This function creates all debugfs files for UBI device @ubi. Returns zero in
* case of success and a negative error code in case of failure.
*/
static int ns_debugfs_create(struct nandsim *ns)
{
struct dentry *root = nsmtd->dbg.dfs_dir;
/*
* Just skip debugfs initialization when the debugfs directory is
* missing.
*/
if (IS_ERR_OR_NULL(root)) {
if (IS_ENABLED(CONFIG_DEBUG_FS) &&
!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
return 0;
}
ns->dent = debugfs_create_file("nandsim_wear_report", 0400, root, ns,
&ns_fops);
if (IS_ERR_OR_NULL(ns->dent)) {
NS_ERR("cannot create \"nandsim_wear_report\" debugfs entry\n");
return -1;
}
return 0;
}
static void ns_debugfs_remove(struct nandsim *ns)
{
debugfs_remove_recursive(ns->dent);
}
/*
* Allocate array of page pointers, create slab allocation for an array
* and initialize the array by NULL pointers.
*
* RETURNS: 0 if success, -ENOMEM if memory alloc fails.
*/
static int __init ns_alloc_device(struct nandsim *ns)
{
struct file *cfile;
int i, err;
if (cache_file) {
cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
if (IS_ERR(cfile))
return PTR_ERR(cfile);
if (!(cfile->f_mode & FMODE_CAN_READ)) {
NS_ERR("alloc_device: cache file not readable\n");
err = -EINVAL;
goto err_close_filp;
}
if (!(cfile->f_mode & FMODE_CAN_WRITE)) {
NS_ERR("alloc_device: cache file not writeable\n");
err = -EINVAL;
goto err_close_filp;
}
ns->pages_written =
vzalloc(array_size(sizeof(unsigned long),
BITS_TO_LONGS(ns->geom.pgnum)));
if (!ns->pages_written) {
NS_ERR("alloc_device: unable to allocate pages written array\n");
err = -ENOMEM;
goto err_close_filp;
}
ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
if (!ns->file_buf) {
NS_ERR("alloc_device: unable to allocate file buf\n");
err = -ENOMEM;
goto err_free_pw;
}
ns->cfile = cfile;
return 0;
err_free_pw:
vfree(ns->pages_written);
err_close_filp:
filp_close(cfile, NULL);
return err;
}
ns->pages = vmalloc(array_size(sizeof(union ns_mem), ns->geom.pgnum));
if (!ns->pages) {
NS_ERR("alloc_device: unable to allocate page array\n");
return -ENOMEM;
}
for (i = 0; i < ns->geom.pgnum; i++) {
ns->pages[i].byte = NULL;
}
ns->nand_pages_slab = kmem_cache_create("nandsim",
ns->geom.pgszoob, 0, 0, NULL);
if (!ns->nand_pages_slab) {
NS_ERR("cache_create: unable to create kmem_cache\n");
err = -ENOMEM;
goto err_free_pg;
}
return 0;
err_free_pg:
vfree(ns->pages);
return err;
}
/*
* Free any allocated pages, and free the array of page pointers.
*/
static void ns_free_device(struct nandsim *ns)
{
int i;
if (ns->cfile) {
kfree(ns->file_buf);
vfree(ns->pages_written);
filp_close(ns->cfile, NULL);
return;
}
if (ns->pages) {
for (i = 0; i < ns->geom.pgnum; i++) {
if (ns->pages[i].byte)
kmem_cache_free(ns->nand_pages_slab,
ns->pages[i].byte);
}
kmem_cache_destroy(ns->nand_pages_slab);
vfree(ns->pages);
}
}
static char __init *ns_get_partition_name(int i)
{
return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
}
/*
* Initialize the nandsim structure.
*
* RETURNS: 0 if success, -ERRNO if failure.
*/
static int __init ns_init(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nandsim *ns = nand_get_controller_data(chip);
int i, ret = 0;
uint64_t remains;
uint64_t next_offset;
if (NS_IS_INITIALIZED(ns)) {
NS_ERR("init_nandsim: nandsim is already initialized\n");
return -EIO;
}
/* Initialize the NAND flash parameters */
ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
ns->geom.totsz = mtd->size;
ns->geom.pgsz = mtd->writesize;
ns->geom.oobsz = mtd->oobsize;
ns->geom.secsz = mtd->erasesize;
ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
ns->geom.secshift = ffs(ns->geom.secsz) - 1;
ns->geom.pgshift = chip->page_shift;
ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz;
ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
ns->options = 0;
if (ns->geom.pgsz == 512) {
ns->options |= OPT_PAGE512;
if (ns->busw == 8)
ns->options |= OPT_PAGE512_8BIT;
} else if (ns->geom.pgsz == 2048) {
ns->options |= OPT_PAGE2048;
} else if (ns->geom.pgsz == 4096) {
ns->options |= OPT_PAGE4096;
} else {
NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
return -EIO;
}
if (ns->options & OPT_SMALLPAGE) {
if (ns->geom.totsz <= (32 << 20)) {
ns->geom.pgaddrbytes = 3;
ns->geom.secaddrbytes = 2;
} else {
ns->geom.pgaddrbytes = 4;
ns->geom.secaddrbytes = 3;
}
} else {
if (ns->geom.totsz <= (128 << 20)) {
ns->geom.pgaddrbytes = 4;
ns->geom.secaddrbytes = 2;
} else {
ns->geom.pgaddrbytes = 5;
ns->geom.secaddrbytes = 3;
}
}
/* Fill the partition_info structure */
if (parts_num > ARRAY_SIZE(ns->partitions)) {
NS_ERR("too many partitions.\n");
return -EINVAL;
}
remains = ns->geom.totsz;
next_offset = 0;
for (i = 0; i < parts_num; ++i) {
uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
if (!part_sz || part_sz > remains) {
NS_ERR("bad partition size.\n");
return -EINVAL;
}
ns->partitions[i].name = ns_get_partition_name(i);
if (!ns->partitions[i].name) {
NS_ERR("unable to allocate memory.\n");
return -ENOMEM;
}
ns->partitions[i].offset = next_offset;
ns->partitions[i].size = part_sz;
next_offset += ns->partitions[i].size;
remains -= ns->partitions[i].size;
}
ns->nbparts = parts_num;
if (remains) {
if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
NS_ERR("too many partitions.\n");
ret = -EINVAL;
goto free_partition_names;
}
ns->partitions[i].name = ns_get_partition_name(i);
if (!ns->partitions[i].name) {
NS_ERR("unable to allocate memory.\n");
ret = -ENOMEM;
goto free_partition_names;
}
ns->partitions[i].offset = next_offset;
ns->partitions[i].size = remains;
ns->nbparts += 1;
}
if (ns->busw == 16)
NS_WARN("16-bit flashes support wasn't tested\n");
printk("flash size: %llu MiB\n",
(unsigned long long)ns->geom.totsz >> 20);
printk("page size: %u bytes\n", ns->geom.pgsz);
printk("OOB area size: %u bytes\n", ns->geom.oobsz);
printk("sector size: %u KiB\n", ns->geom.secsz >> 10);
printk("pages number: %u\n", ns->geom.pgnum);
printk("pages per sector: %u\n", ns->geom.pgsec);
printk("bus width: %u\n", ns->busw);
printk("bits in sector size: %u\n", ns->geom.secshift);
printk("bits in page size: %u\n", ns->geom.pgshift);
printk("bits in OOB size: %u\n", ffs(ns->geom.oobsz) - 1);
printk("flash size with OOB: %llu KiB\n",
(unsigned long long)ns->geom.totszoob >> 10);
printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
printk("options: %#x\n", ns->options);
ret = ns_alloc_device(ns);
if (ret)
goto free_partition_names;
/* Allocate / initialize the internal buffer */
ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
if (!ns->buf.byte) {
NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
ns->geom.pgszoob);
ret = -ENOMEM;
goto free_device;
}
memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
return 0;
free_device:
ns_free_device(ns);
free_partition_names:
for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i)
kfree(ns->partitions[i].name);
return ret;
}
/*
* Free the nandsim structure.
*/
static void ns_free(struct nandsim *ns)
{
int i;
for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i)
kfree(ns->partitions[i].name);
kfree(ns->buf.byte);
ns_free_device(ns);
return;
}
static int ns_parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
{
char *w;
int zero_ok;
unsigned int erase_block_no;
loff_t offset;
if (!badblocks)
return 0;
w = badblocks;
do {
zero_ok = (*w == '0' ? 1 : 0);
erase_block_no = simple_strtoul(w, &w, 0);
if (!zero_ok && !erase_block_no) {
NS_ERR("invalid badblocks.\n");
return -EINVAL;
}
offset = (loff_t)erase_block_no * ns->geom.secsz;
if (mtd_block_markbad(mtd, offset)) {
NS_ERR("invalid badblocks.\n");
return -EINVAL;
}
if (*w == ',')
w += 1;
} while (*w);
return 0;
}
static int ns_parse_weakblocks(void)
{
char *w;
int zero_ok;
unsigned int erase_block_no;
unsigned int max_erases;
struct weak_block *wb;
if (!weakblocks)
return 0;
w = weakblocks;
do {
zero_ok = (*w == '0' ? 1 : 0);
erase_block_no = simple_strtoul(w, &w, 0);
if (!zero_ok && !erase_block_no) {
NS_ERR("invalid weakblocks.\n");
return -EINVAL;
}
max_erases = 3;
if (*w == ':') {
w += 1;
max_erases = simple_strtoul(w, &w, 0);
}
if (*w == ',')
w += 1;
wb = kzalloc(sizeof(*wb), GFP_KERNEL);
if (!wb) {
NS_ERR("unable to allocate memory.\n");
return -ENOMEM;
}
wb->erase_block_no = erase_block_no;
wb->max_erases = max_erases;
list_add(&wb->list, &weak_blocks);
} while (*w);
return 0;
}
static int ns_erase_error(unsigned int erase_block_no)
{
struct weak_block *wb;
list_for_each_entry(wb, &weak_blocks, list)
if (wb->erase_block_no == erase_block_no) {
if (wb->erases_done >= wb->max_erases)
return 1;
wb->erases_done += 1;
return 0;
}
return 0;
}
static int ns_parse_weakpages(void)
{
char *w;
int zero_ok;
unsigned int page_no;
unsigned int max_writes;
struct weak_page *wp;
if (!weakpages)
return 0;
w = weakpages;
do {
zero_ok = (*w == '0' ? 1 : 0);
page_no = simple_strtoul(w, &w, 0);
if (!zero_ok && !page_no) {
NS_ERR("invalid weakpages.\n");
return -EINVAL;
}
max_writes = 3;
if (*w == ':') {
w += 1;
max_writes = simple_strtoul(w, &w, 0);
}
if (*w == ',')
w += 1;
wp = kzalloc(sizeof(*wp), GFP_KERNEL);
if (!wp) {
NS_ERR("unable to allocate memory.\n");
return -ENOMEM;
}
wp->page_no = page_no;
wp->max_writes = max_writes;
list_add(&wp->list, &weak_pages);
} while (*w);
return 0;
}
static int ns_write_error(unsigned int page_no)
{
struct weak_page *wp;
list_for_each_entry(wp, &weak_pages, list)
if (wp->page_no == page_no) {
if (wp->writes_done >= wp->max_writes)
return 1;
wp->writes_done += 1;
return 0;
}
return 0;
}
static int ns_parse_gravepages(void)
{
char *g;
int zero_ok;
unsigned int page_no;
unsigned int max_reads;
struct grave_page *gp;
if (!gravepages)
return 0;
g = gravepages;
do {
zero_ok = (*g == '0' ? 1 : 0);
page_no = simple_strtoul(g, &g, 0);
if (!zero_ok && !page_no) {
NS_ERR("invalid gravepagess.\n");
return -EINVAL;
}
max_reads = 3;
if (*g == ':') {
g += 1;
max_reads = simple_strtoul(g, &g, 0);
}
if (*g == ',')
g += 1;
gp = kzalloc(sizeof(*gp), GFP_KERNEL);
if (!gp) {
NS_ERR("unable to allocate memory.\n");
return -ENOMEM;
}
gp->page_no = page_no;
gp->max_reads = max_reads;
list_add(&gp->list, &grave_pages);
} while (*g);
return 0;
}
static int ns_read_error(unsigned int page_no)
{
struct grave_page *gp;
list_for_each_entry(gp, &grave_pages, list)
if (gp->page_no == page_no) {
if (gp->reads_done >= gp->max_reads)
return 1;
gp->reads_done += 1;
return 0;
}
return 0;
}
static int ns_setup_wear_reporting(struct mtd_info *mtd)
{
wear_eb_count = div_u64(mtd->size, mtd->erasesize);
erase_block_wear = kcalloc(wear_eb_count, sizeof(unsigned long), GFP_KERNEL);
if (!erase_block_wear) {
NS_ERR("Too many erase blocks for wear reporting\n");
return -ENOMEM;
}
return 0;
}
static void ns_update_wear(unsigned int erase_block_no)
{
if (!erase_block_wear)
return;
total_wear += 1;
/*
* TODO: Notify this through a debugfs entry,
* instead of showing an error message.
*/
if (total_wear == 0)
NS_ERR("Erase counter total overflow\n");
erase_block_wear[erase_block_no] += 1;
if (erase_block_wear[erase_block_no] == 0)
NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
}
/*
* Returns the string representation of 'state' state.
*/
static char *ns_get_state_name(uint32_t state)
{
switch (NS_STATE(state)) {
case STATE_CMD_READ0:
return "STATE_CMD_READ0";
case STATE_CMD_READ1:
return "STATE_CMD_READ1";
case STATE_CMD_PAGEPROG:
return "STATE_CMD_PAGEPROG";
case STATE_CMD_READOOB:
return "STATE_CMD_READOOB";
case STATE_CMD_READSTART:
return "STATE_CMD_READSTART";
case STATE_CMD_ERASE1:
return "STATE_CMD_ERASE1";
case STATE_CMD_STATUS:
return "STATE_CMD_STATUS";
case STATE_CMD_SEQIN:
return "STATE_CMD_SEQIN";
case STATE_CMD_READID:
return "STATE_CMD_READID";
case STATE_CMD_ERASE2:
return "STATE_CMD_ERASE2";
case STATE_CMD_RESET:
return "STATE_CMD_RESET";
case STATE_CMD_RNDOUT:
return "STATE_CMD_RNDOUT";
case STATE_CMD_RNDOUTSTART:
return "STATE_CMD_RNDOUTSTART";
case STATE_ADDR_PAGE:
return "STATE_ADDR_PAGE";
case STATE_ADDR_SEC:
return "STATE_ADDR_SEC";
case STATE_ADDR_ZERO:
return "STATE_ADDR_ZERO";
case STATE_ADDR_COLUMN:
return "STATE_ADDR_COLUMN";
case STATE_DATAIN:
return "STATE_DATAIN";
case STATE_DATAOUT:
return "STATE_DATAOUT";
case STATE_DATAOUT_ID:
return "STATE_DATAOUT_ID";
case STATE_DATAOUT_STATUS:
return "STATE_DATAOUT_STATUS";
case STATE_READY:
return "STATE_READY";
case STATE_UNKNOWN:
return "STATE_UNKNOWN";
}
NS_ERR("get_state_name: unknown state, BUG\n");
return NULL;
}
/*
* Check if command is valid.
*
* RETURNS: 1 if wrong command, 0 if right.
*/
static int ns_check_command(int cmd)
{
switch (cmd) {
case NAND_CMD_READ0:
case NAND_CMD_READ1:
case NAND_CMD_READSTART:
case NAND_CMD_PAGEPROG:
case NAND_CMD_READOOB:
case NAND_CMD_ERASE1:
case NAND_CMD_STATUS:
case NAND_CMD_SEQIN:
case NAND_CMD_READID:
case NAND_CMD_ERASE2:
case NAND_CMD_RESET:
case NAND_CMD_RNDOUT:
case NAND_CMD_RNDOUTSTART:
return 0;
default:
return 1;
}
}
/*
* Returns state after command is accepted by command number.
*/
static uint32_t ns_get_state_by_command(unsigned command)
{
switch (command) {
case NAND_CMD_READ0:
return STATE_CMD_READ0;
case NAND_CMD_READ1:
return STATE_CMD_READ1;
case NAND_CMD_PAGEPROG:
return STATE_CMD_PAGEPROG;
case NAND_CMD_READSTART:
return STATE_CMD_READSTART;
case NAND_CMD_READOOB:
return STATE_CMD_READOOB;
case NAND_CMD_ERASE1:
return STATE_CMD_ERASE1;
case NAND_CMD_STATUS:
return STATE_CMD_STATUS;
case NAND_CMD_SEQIN:
return STATE_CMD_SEQIN;
case NAND_CMD_READID:
return STATE_CMD_READID;
case NAND_CMD_ERASE2:
return STATE_CMD_ERASE2;
case NAND_CMD_RESET:
return STATE_CMD_RESET;
case NAND_CMD_RNDOUT:
return STATE_CMD_RNDOUT;
case NAND_CMD_RNDOUTSTART:
return STATE_CMD_RNDOUTSTART;
}
NS_ERR("get_state_by_command: unknown command, BUG\n");
return 0;
}
/*
* Move an address byte to the correspondent internal register.
*/
static inline void ns_accept_addr_byte(struct nandsim *ns, u_char bt)
{
uint byte = (uint)bt;
if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes))
ns->regs.column |= (byte << 8 * ns->regs.count);
else {
ns->regs.row |= (byte << 8 * (ns->regs.count -
ns->geom.pgaddrbytes +
ns->geom.secaddrbytes));
}
return;
}
/*
* Switch to STATE_READY state.
*/
static inline void ns_switch_to_ready_state(struct nandsim *ns, u_char status)
{
NS_DBG("switch_to_ready_state: switch to %s state\n",
ns_get_state_name(STATE_READY));
ns->state = STATE_READY;
ns->nxstate = STATE_UNKNOWN;
ns->op = NULL;
ns->npstates = 0;
ns->stateidx = 0;
ns->regs.num = 0;
ns->regs.count = 0;
ns->regs.off = 0;
ns->regs.row = 0;
ns->regs.column = 0;
ns->regs.status = status;
}
/*
* If the operation isn't known yet, try to find it in the global array
* of supported operations.
*
* Operation can be unknown because of the following.
* 1. New command was accepted and this is the first call to find the
* correspondent states chain. In this case ns->npstates = 0;
* 2. There are several operations which begin with the same command(s)
* (for example program from the second half and read from the
* second half operations both begin with the READ1 command). In this
* case the ns->pstates[] array contains previous states.
*
* Thus, the function tries to find operation containing the following
* states (if the 'flag' parameter is 0):
* ns->pstates[0], ... ns->pstates[ns->npstates], ns->state
*
* If (one and only one) matching operation is found, it is accepted (
* ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
* zeroed).
*
* If there are several matches, the current state is pushed to the
* ns->pstates.
*
* The operation can be unknown only while commands are input to the chip.
* As soon as address command is accepted, the operation must be known.
* In such situation the function is called with 'flag' != 0, and the
* operation is searched using the following pattern:
* ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
*
* It is supposed that this pattern must either match one operation or
* none. There can't be ambiguity in that case.
*
* If no matches found, the function does the following:
* 1. if there are saved states present, try to ignore them and search
* again only using the last command. If nothing was found, switch
* to the STATE_READY state.
* 2. if there are no saved states, switch to the STATE_READY state.
*
* RETURNS: -2 - no matched operations found.
* -1 - several matches.
* 0 - operation is found.
*/
static int ns_find_operation(struct nandsim *ns, uint32_t flag)
{
int opsfound = 0;
int i, j, idx = 0;
for (i = 0; i < NS_OPER_NUM; i++) {
int found = 1;
if (!(ns->options & ops[i].reqopts))
/* Ignore operations we can't perform */
continue;
if (flag) {
if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK))
continue;
} else {
if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates]))
continue;
}
for (j = 0; j < ns->npstates; j++)
if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j])
&& (ns->options & ops[idx].reqopts)) {
found = 0;
break;
}
if (found) {
idx = i;
opsfound += 1;
}
}
if (opsfound == 1) {
/* Exact match */
ns->op = &ops[idx].states[0];
if (flag) {
/*
* In this case the find_operation function was
* called when address has just began input. But it isn't
* yet fully input and the current state must
* not be one of STATE_ADDR_*, but the STATE_ADDR_*
* state must be the next state (ns->nxstate).
*/
ns->stateidx = ns->npstates - 1;
} else {
ns->stateidx = ns->npstates;
}
ns->npstates = 0;
ns->state = ns->op[ns->stateidx];
ns->nxstate = ns->op[ns->stateidx + 1];
NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
idx, ns_get_state_name(ns->state),
ns_get_state_name(ns->nxstate));
return 0;
}
if (opsfound == 0) {
/* Nothing was found. Try to ignore previous commands (if any) and search again */
if (ns->npstates != 0) {
NS_DBG("find_operation: no operation found, try again with state %s\n",
ns_get_state_name(ns->state));
ns->npstates = 0;
return ns_find_operation(ns, 0);
}
NS_DBG("find_operation: no operations found\n");
ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return -2;
}
if (flag) {
/* This shouldn't happen */
NS_DBG("find_operation: BUG, operation must be known if address is input\n");
return -2;
}
NS_DBG("find_operation: there is still ambiguity\n");
ns->pstates[ns->npstates++] = ns->state;
return -1;
}
static void ns_put_pages(struct nandsim *ns)
{
int i;
for (i = 0; i < ns->held_cnt; i++)
put_page(ns->held_pages[i]);
}
/* Get page cache pages in advance to provide NOFS memory allocation */
static int ns_get_pages(struct nandsim *ns, struct file *file, size_t count,
loff_t pos)
{
pgoff_t index, start_index, end_index;
struct page *page;
struct address_space *mapping = file->f_mapping;
start_index = pos >> PAGE_SHIFT;
end_index = (pos + count - 1) >> PAGE_SHIFT;
if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
return -EINVAL;
ns->held_cnt = 0;
for (index = start_index; index <= end_index; index++) {
page = find_get_page(mapping, index);
if (page == NULL) {
page = find_or_create_page(mapping, index, GFP_NOFS);
if (page == NULL) {
write_inode_now(mapping->host, 1);
page = find_or_create_page(mapping, index, GFP_NOFS);
}
if (page == NULL) {
ns_put_pages(ns);
return -ENOMEM;
}
unlock_page(page);
}
ns->held_pages[ns->held_cnt++] = page;
}
return 0;
}
static ssize_t ns_read_file(struct nandsim *ns, struct file *file, void *buf,
size_t count, loff_t pos)
{
ssize_t tx;
int err;
unsigned int noreclaim_flag;
err = ns_get_pages(ns, file, count, pos);
if (err)
return err;
noreclaim_flag = memalloc_noreclaim_save();
tx = kernel_read(file, buf, count, &pos);
memalloc_noreclaim_restore(noreclaim_flag);
ns_put_pages(ns);
return tx;
}
static ssize_t ns_write_file(struct nandsim *ns, struct file *file, void *buf,
size_t count, loff_t pos)
{
ssize_t tx;
int err;
unsigned int noreclaim_flag;
err = ns_get_pages(ns, file, count, pos);
if (err)
return err;
noreclaim_flag = memalloc_noreclaim_save();
tx = kernel_write(file, buf, count, &pos);
memalloc_noreclaim_restore(noreclaim_flag);
ns_put_pages(ns);
return tx;
}
/*
* Returns a pointer to the current page.
*/
static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
{
return &(ns->pages[ns->regs.row]);
}
/*
* Retuns a pointer to the current byte, within the current page.
*/
static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
{
return NS_GET_PAGE(ns)->byte + NS_PAGE_BYTE_SHIFT(ns);
}
static int ns_do_read_error(struct nandsim *ns, int num)
{
unsigned int page_no = ns->regs.row;
if (ns_read_error(page_no)) {
get_random_bytes(ns->buf.byte, num);
NS_WARN("simulating read error in page %u\n", page_no);
return 1;
}
return 0;
}
static void ns_do_bit_flips(struct nandsim *ns, int num)
{
if (bitflips && get_random_u16() < (1 << 6)) {
int flips = 1;
if (bitflips > 1)
flips = get_random_u32_inclusive(1, bitflips);
while (flips--) {
int pos = get_random_u32_below(num * 8);
ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
NS_WARN("read_page: flipping bit %d in page %d "
"reading from %d ecc: corrected=%u failed=%u\n",
pos, ns->regs.row, NS_PAGE_BYTE_SHIFT(ns),
nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
}
}
}
/*
* Fill the NAND buffer with data read from the specified page.
*/
static void ns_read_page(struct nandsim *ns, int num)
{
union ns_mem *mypage;
if (ns->cfile) {
if (!test_bit(ns->regs.row, ns->pages_written)) {
NS_DBG("read_page: page %d not written\n", ns->regs.row);
memset(ns->buf.byte, 0xFF, num);
} else {
loff_t pos;
ssize_t tx;
NS_DBG("read_page: page %d written, reading from %d\n",
ns->regs.row, NS_PAGE_BYTE_SHIFT(ns));
if (ns_do_read_error(ns, num))
return;
pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
tx = ns_read_file(ns, ns->cfile, ns->buf.byte, num,
pos);
if (tx != num) {
NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
return;
}
ns_do_bit_flips(ns, num);
}
return;
}
mypage = NS_GET_PAGE(ns);
if (mypage->byte == NULL) {
NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
memset(ns->buf.byte, 0xFF, num);
} else {
NS_DBG("read_page: page %d allocated, reading from %d\n",
ns->regs.row, NS_PAGE_BYTE_SHIFT(ns));
if (ns_do_read_error(ns, num))
return;
memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
ns_do_bit_flips(ns, num);
}
}
/*
* Erase all pages in the specified sector.
*/
static void ns_erase_sector(struct nandsim *ns)
{
union ns_mem *mypage;
int i;
if (ns->cfile) {
for (i = 0; i < ns->geom.pgsec; i++)
if (__test_and_clear_bit(ns->regs.row + i,
ns->pages_written)) {
NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
}
return;
}
mypage = NS_GET_PAGE(ns);
for (i = 0; i < ns->geom.pgsec; i++) {
if (mypage->byte != NULL) {
NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
kmem_cache_free(ns->nand_pages_slab, mypage->byte);
mypage->byte = NULL;
}
mypage++;
}
}
/*
* Program the specified page with the contents from the NAND buffer.
*/
static int ns_prog_page(struct nandsim *ns, int num)
{
int i;
union ns_mem *mypage;
u_char *pg_off;
if (ns->cfile) {
loff_t off;
ssize_t tx;
int all;
NS_DBG("prog_page: writing page %d\n", ns->regs.row);
pg_off = ns->file_buf + NS_PAGE_BYTE_SHIFT(ns);
off = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
if (!test_bit(ns->regs.row, ns->pages_written)) {
all = 1;
memset(ns->file_buf, 0xff, ns->geom.pgszoob);
} else {
all = 0;
tx = ns_read_file(ns, ns->cfile, pg_off, num, off);
if (tx != num) {
NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
}
}
for (i = 0; i < num; i++)
pg_off[i] &= ns->buf.byte[i];
if (all) {
loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
tx = ns_write_file(ns, ns->cfile, ns->file_buf,
ns->geom.pgszoob, pos);
if (tx != ns->geom.pgszoob) {
NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
}
__set_bit(ns->regs.row, ns->pages_written);
} else {
tx = ns_write_file(ns, ns->cfile, pg_off, num, off);
if (tx != num) {
NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
}
}
return 0;
}
mypage = NS_GET_PAGE(ns);
if (mypage->byte == NULL) {
NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
/*
* We allocate memory with GFP_NOFS because a flash FS may
* utilize this. If it is holding an FS lock, then gets here,
* then kernel memory alloc runs writeback which goes to the FS
* again and deadlocks. This was seen in practice.
*/
mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
if (mypage->byte == NULL) {
NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
return -1;
}
memset(mypage->byte, 0xFF, ns->geom.pgszoob);
}
pg_off = NS_PAGE_BYTE_OFF(ns);
for (i = 0; i < num; i++)
pg_off[i] &= ns->buf.byte[i];
return 0;
}
/*
* If state has any action bit, perform this action.
*
* RETURNS: 0 if success, -1 if error.
*/
static int ns_do_state_action(struct nandsim *ns, uint32_t action)
{
int num;
int busdiv = ns->busw == 8 ? 1 : 2;
unsigned int erase_block_no, page_no;
action &= ACTION_MASK;
/* Check that page address input is correct */
if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) {
NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row);
return -1;
}
switch (action) {
case ACTION_CPY:
/*
* Copy page data to the internal buffer.
*/
/* Column shouldn't be very large */
if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) {
NS_ERR("do_state_action: column number is too large\n");
break;
}
num = ns->geom.pgszoob - NS_PAGE_BYTE_SHIFT(ns);
ns_read_page(ns, num);
NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
num, NS_RAW_OFFSET(ns) + ns->regs.off);
if (ns->regs.off == 0)
NS_LOG("read page %d\n", ns->regs.row);
else if (ns->regs.off < ns->geom.pgsz)
NS_LOG("read page %d (second half)\n", ns->regs.row);
else
NS_LOG("read OOB of page %d\n", ns->regs.row);
NS_UDELAY(access_delay);
NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv);
break;
case ACTION_SECERASE:
/*
* Erase sector.
*/
if (ns->lines.wp) {
NS_ERR("do_state_action: device is write-protected, ignore sector erase\n");
return -1;
}
if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec
|| (ns->regs.row & ~(ns->geom.secsz - 1))) {
NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row);
return -1;
}
ns->regs.row = (ns->regs.row <<
8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
ns->regs.column = 0;
erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift);
NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
ns->regs.row, NS_RAW_OFFSET(ns));
NS_LOG("erase sector %u\n", erase_block_no);
ns_erase_sector(ns);
NS_MDELAY(erase_delay);
if (erase_block_wear)
ns_update_wear(erase_block_no);
if (ns_erase_error(erase_block_no)) {
NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
return -1;
}
break;
case ACTION_PRGPAGE:
/*
* Program page - move internal buffer data to the page.
*/
if (ns->lines.wp) {
NS_WARN("do_state_action: device is write-protected, programm\n");
return -1;
}
num = ns->geom.pgszoob - NS_PAGE_BYTE_SHIFT(ns);
if (num != ns->regs.count) {
NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
ns->regs.count, num);
return -1;
}
if (ns_prog_page(ns, num) == -1)
return -1;
page_no = ns->regs.row;
NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
NS_LOG("programm page %d\n", ns->regs.row);
NS_UDELAY(programm_delay);
NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
if (ns_write_error(page_no)) {
NS_WARN("simulating write failure in page %u\n", page_no);
return -1;
}
break;
case ACTION_ZEROOFF:
NS_DBG("do_state_action: set internal offset to 0\n");
ns->regs.off = 0;
break;
case ACTION_HALFOFF:
if (!(ns->options & OPT_PAGE512_8BIT)) {
NS_ERR("do_state_action: BUG! can't skip half of page for non-512"
"byte page size 8x chips\n");
return -1;
}
NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2);
ns->regs.off = ns->geom.pgsz/2;
break;
case ACTION_OOBOFF:
NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz);
ns->regs.off = ns->geom.pgsz;
break;
default:
NS_DBG("do_state_action: BUG! unknown action\n");
}
return 0;
}
/*
* Switch simulator's state.
*/
static void ns_switch_state(struct nandsim *ns)
{
if (ns->op) {
/*
* The current operation have already been identified.
* Just follow the states chain.
*/
ns->stateidx += 1;
ns->state = ns->nxstate;
ns->nxstate = ns->op[ns->stateidx + 1];
NS_DBG("switch_state: operation is known, switch to the next state, "
"state: %s, nxstate: %s\n",
ns_get_state_name(ns->state),
ns_get_state_name(ns->nxstate));
} else {
/*
* We don't yet know which operation we perform.
* Try to identify it.
*/
/*
* The only event causing the switch_state function to
* be called with yet unknown operation is new command.
*/
ns->state = ns_get_state_by_command(ns->regs.command);
NS_DBG("switch_state: operation is unknown, try to find it\n");
if (ns_find_operation(ns, 0))
return;
}
/* See, whether we need to do some action */
if ((ns->state & ACTION_MASK) &&
ns_do_state_action(ns, ns->state) < 0) {
ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
/* For 16x devices column means the page offset in words */
if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) {
NS_DBG("switch_state: double the column number for 16x device\n");
ns->regs.column <<= 1;
}
if (NS_STATE(ns->nxstate) == STATE_READY) {
/*
* The current state is the last. Return to STATE_READY
*/
u_char status = NS_STATUS_OK(ns);
/* In case of data states, see if all bytes were input/output */
if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK))
&& ns->regs.count != ns->regs.num) {
NS_WARN("switch_state: not all bytes were processed, %d left\n",
ns->regs.num - ns->regs.count);
status = NS_STATUS_FAILED(ns);
}
NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
ns_switch_to_ready_state(ns, status);
return;
} else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
/*
* If the next state is data input/output, switch to it now
*/
ns->state = ns->nxstate;
ns->nxstate = ns->op[++ns->stateidx + 1];
ns->regs.num = ns->regs.count = 0;
NS_DBG("switch_state: the next state is data I/O, switch, "
"state: %s, nxstate: %s\n",
ns_get_state_name(ns->state),
ns_get_state_name(ns->nxstate));
/*
* Set the internal register to the count of bytes which
* are expected to be input or output
*/
switch (NS_STATE(ns->state)) {
case STATE_DATAIN:
case STATE_DATAOUT:
ns->regs.num = ns->geom.pgszoob - NS_PAGE_BYTE_SHIFT(ns);
break;
case STATE_DATAOUT_ID:
ns->regs.num = ns->geom.idbytes;
break;
case STATE_DATAOUT_STATUS:
ns->regs.count = ns->regs.num = 0;
break;
default:
NS_ERR("switch_state: BUG! unknown data state\n");
}
} else if (ns->nxstate & STATE_ADDR_MASK) {
/*
* If the next state is address input, set the internal
* register to the number of expected address bytes
*/
ns->regs.count = 0;
switch (NS_STATE(ns->nxstate)) {
case STATE_ADDR_PAGE:
ns->regs.num = ns->geom.pgaddrbytes;
break;
case STATE_ADDR_SEC:
ns->regs.num = ns->geom.secaddrbytes;
break;
case STATE_ADDR_ZERO:
ns->regs.num = 1;
break;
case STATE_ADDR_COLUMN:
/* Column address is always 2 bytes */
ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes;
break;
default:
NS_ERR("switch_state: BUG! unknown address state\n");
}
} else {
/*
* Just reset internal counters.
*/
ns->regs.num = 0;
ns->regs.count = 0;
}
}
static u_char ns_nand_read_byte(struct nand_chip *chip)
{
struct nandsim *ns = nand_get_controller_data(chip);
u_char outb = 0x00;
/* Sanity and correctness checks */
if (!ns->lines.ce) {
NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb);
return outb;
}
if (ns->lines.ale || ns->lines.cle) {
NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb);
return outb;
}
if (!(ns->state & STATE_DATAOUT_MASK)) {
NS_WARN("read_byte: unexpected data output cycle, state is %s return %#x\n",
ns_get_state_name(ns->state), (uint)outb);
return outb;
}
/* Status register may be read as many times as it is wanted */
if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) {
NS_DBG("read_byte: return %#x status\n", ns->regs.status);
return ns->regs.status;
}
/* Check if there is any data in the internal buffer which may be read */
if (ns->regs.count == ns->regs.num) {
NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb);
return outb;
}
switch (NS_STATE(ns->state)) {
case STATE_DATAOUT:
if (ns->busw == 8) {
outb = ns->buf.byte[ns->regs.count];
ns->regs.count += 1;
} else {
outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]);
ns->regs.count += 2;
}
break;
case STATE_DATAOUT_ID:
NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num);
outb = ns->ids[ns->regs.count];
ns->regs.count += 1;
break;
default:
BUG();
}
if (ns->regs.count == ns->regs.num) {
NS_DBG("read_byte: all bytes were read\n");
if (NS_STATE(ns->nxstate) == STATE_READY)
ns_switch_state(ns);
}
return outb;
}
static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
{
struct nandsim *ns = nand_get_controller_data(chip);
/* Sanity and correctness checks */
if (!ns->lines.ce) {
NS_ERR("write_byte: chip is disabled, ignore write\n");
return;
}
if (ns->lines.ale && ns->lines.cle) {
NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n");
return;
}
if (ns->lines.cle == 1) {
/*
* The byte written is a command.
*/
if (byte == NAND_CMD_RESET) {
NS_LOG("reset chip\n");
ns_switch_to_ready_state(ns, NS_STATUS_OK(ns));
return;
}
/* Check that the command byte is correct */
if (ns_check_command(byte)) {
NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
return;
}
if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
|| NS_STATE(ns->state) == STATE_DATAOUT) {
int row = ns->regs.row;
ns_switch_state(ns);
if (byte == NAND_CMD_RNDOUT)
ns->regs.row = row;
}
/* Check if chip is expecting command */
if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
/* Do not warn if only 2 id bytes are read */
if (!(ns->regs.command == NAND_CMD_READID &&
NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
/*
* We are in situation when something else (not command)
* was expected but command was input. In this case ignore
* previous command(s)/state(s) and accept the last one.
*/
NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, ignore previous states\n",
(uint)byte,
ns_get_state_name(ns->nxstate));
}
ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
}
NS_DBG("command byte corresponding to %s state accepted\n",
ns_get_state_name(ns_get_state_by_command(byte)));
ns->regs.command = byte;
ns_switch_state(ns);
} else if (ns->lines.ale == 1) {
/*
* The byte written is an address.
*/
if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) {
NS_DBG("write_byte: operation isn't known yet, identify it\n");
if (ns_find_operation(ns, 1) < 0)
return;
if ((ns->state & ACTION_MASK) &&
ns_do_state_action(ns, ns->state) < 0) {
ns_switch_to_ready_state(ns,
NS_STATUS_FAILED(ns));
return;
}
ns->regs.count = 0;
switch (NS_STATE(ns->nxstate)) {
case STATE_ADDR_PAGE:
ns->regs.num = ns->geom.pgaddrbytes;
break;
case STATE_ADDR_SEC:
ns->regs.num = ns->geom.secaddrbytes;
break;
case STATE_ADDR_ZERO:
ns->regs.num = 1;
break;
default:
BUG();
}
}
/* Check that chip is expecting address */
if (!(ns->nxstate & STATE_ADDR_MASK)) {
NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, switch to STATE_READY\n",
(uint)byte, ns_get_state_name(ns->nxstate));
ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
/* Check if this is expected byte */
if (ns->regs.count == ns->regs.num) {
NS_ERR("write_byte: no more address bytes expected\n");
ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
ns_accept_addr_byte(ns, byte);
ns->regs.count += 1;
NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n",
(uint)byte, ns->regs.count, ns->regs.num);
if (ns->regs.count == ns->regs.num) {
NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
ns_switch_state(ns);
}
} else {
/*
* The byte written is an input data.
*/
/* Check that chip is expecting data input */
if (!(ns->state & STATE_DATAIN_MASK)) {
NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, switch to %s\n",
(uint)byte, ns_get_state_name(ns->state),
ns_get_state_name(STATE_READY));
ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
/* Check if this is expected byte */
if (ns->regs.count == ns->regs.num) {
NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n",
ns->regs.num);
return;
}
if (ns->busw == 8) {
ns->buf.byte[ns->regs.count] = byte;
ns->regs.count += 1;
} else {
ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte);
ns->regs.count += 2;
}
}
return;
}
static void ns_nand_write_buf(struct nand_chip *chip, const u_char *buf,
int len)
{
struct nandsim *ns = nand_get_controller_data(chip);
/* Check that chip is expecting data input */
if (!(ns->state & STATE_DATAIN_MASK)) {
NS_ERR("write_buf: data input isn't expected, state is %s, switch to STATE_READY\n",
ns_get_state_name(ns->state));
ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
/* Check if these are expected bytes */
if (ns->regs.count + len > ns->regs.num) {
NS_ERR("write_buf: too many input bytes\n");
ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
memcpy(ns->buf.byte + ns->regs.count, buf, len);
ns->regs.count += len;
if (ns->regs.count == ns->regs.num) {
NS_DBG("write_buf: %d bytes were written\n", ns->regs.count);
}
}
static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
{
struct nandsim *ns = nand_get_controller_data(chip);
/* Sanity and correctness checks */
if (!ns->lines.ce) {
NS_ERR("read_buf: chip is disabled\n");
return;
}
if (ns->lines.ale || ns->lines.cle) {
NS_ERR("read_buf: ALE or CLE pin is high\n");
return;
}
if (!(ns->state & STATE_DATAOUT_MASK)) {
NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
ns_get_state_name(ns->state));
return;
}
if (NS_STATE(ns->state) != STATE_DATAOUT) {
int i;
for (i = 0; i < len; i++)
buf[i] = ns_nand_read_byte(chip);
return;
}
/* Check if these are expected bytes */
if (ns->regs.count + len > ns->regs.num) {
NS_ERR("read_buf: too many bytes to read\n");
ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
memcpy(buf, ns->buf.byte + ns->regs.count, len);
ns->regs.count += len;
if (ns->regs.count == ns->regs.num) {
if (NS_STATE(ns->nxstate) == STATE_READY)
ns_switch_state(ns);
}
return;
}
static int ns_exec_op(struct nand_chip *chip, const struct nand_operation *op,
bool check_only)
{
int i;
unsigned int op_id;
const struct nand_op_instr *instr = NULL;
struct nandsim *ns = nand_get_controller_data(chip);
if (check_only) {
/* The current implementation of nandsim needs to know the
* ongoing operation when performing the address cycles. This
* means it cannot make the difference between a regular read
* and a continuous read. Hence, this hack to manually refuse
* supporting sequential cached operations.
*/
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
if (instr->type == NAND_OP_CMD_INSTR &&
(instr->ctx.cmd.opcode == NAND_CMD_READCACHEEND ||
instr->ctx.cmd.opcode == NAND_CMD_READCACHESEQ))
return -EOPNOTSUPP;
}
return 0;
}
ns->lines.ce = 1;
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
ns->lines.cle = 0;
ns->lines.ale = 0;
switch (instr->type) {
case NAND_OP_CMD_INSTR:
ns->lines.cle = 1;
ns_nand_write_byte(chip, instr->ctx.cmd.opcode);
break;
case NAND_OP_ADDR_INSTR:
ns->lines.ale = 1;
for (i = 0; i < instr->ctx.addr.naddrs; i++)
ns_nand_write_byte(chip, instr->ctx.addr.addrs[i]);
break;
case NAND_OP_DATA_IN_INSTR:
ns_nand_read_buf(chip, instr->ctx.data.buf.in, instr->ctx.data.len);
break;
case NAND_OP_DATA_OUT_INSTR:
ns_nand_write_buf(chip, instr->ctx.data.buf.out, instr->ctx.data.len);
break;
case NAND_OP_WAITRDY_INSTR:
/* we are always ready */
break;
}
}
return 0;
}
static int ns_attach_chip(struct nand_chip *chip)
{
unsigned int eccsteps, eccbytes;
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
chip->ecc.algo = bch ? NAND_ECC_ALGO_BCH : NAND_ECC_ALGO_HAMMING;
if (!bch)
return 0;
if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
NS_ERR("BCH ECC support is disabled\n");
return -EINVAL;
}
/* Use 512-byte ecc blocks */
eccsteps = nsmtd->writesize / 512;
eccbytes = ((bch * 13) + 7) / 8;
/* Do not bother supporting small page devices */
if (nsmtd->oobsize < 64 || !eccsteps) {
NS_ERR("BCH not available on small page devices\n");
return -EINVAL;
}
if (((eccbytes * eccsteps) + 2) > nsmtd->oobsize) {
NS_ERR("Invalid BCH value %u\n", bch);
return -EINVAL;
}
chip->ecc.size = 512;
chip->ecc.strength = bch;
chip->ecc.bytes = eccbytes;
NS_INFO("Using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
return 0;
}
static const struct nand_controller_ops ns_controller_ops = {
.attach_chip = ns_attach_chip,
.exec_op = ns_exec_op,
};
/*
* Module initialization function
*/
static int __init ns_init_module(void)
{
struct list_head *pos, *n;
struct nand_chip *chip;
struct nandsim *ns;
int ret;
if (bus_width != 8 && bus_width != 16) {
NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
return -EINVAL;
}
ns = kzalloc(sizeof(struct nandsim), GFP_KERNEL);
if (!ns) {
NS_ERR("unable to allocate core structures.\n");
return -ENOMEM;
}
chip = &ns->chip;
nsmtd = nand_to_mtd(chip);
nand_set_controller_data(chip, (void *)ns);
/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
/* and 'badblocks' parameters to work */
chip->options |= NAND_SKIP_BBTSCAN;
switch (bbt) {
case 2:
chip->bbt_options |= NAND_BBT_NO_OOB;
fallthrough;
case 1:
chip->bbt_options |= NAND_BBT_USE_FLASH;
fallthrough;
case 0:
break;
default:
NS_ERR("bbt has to be 0..2\n");
ret = -EINVAL;
goto free_ns_struct;
}
/*
* Perform minimum nandsim structure initialization to handle
* the initial ID read command correctly
*/
if (id_bytes[6] != 0xFF || id_bytes[7] != 0xFF)
ns->geom.idbytes = 8;
else if (id_bytes[4] != 0xFF || id_bytes[5] != 0xFF)
ns->geom.idbytes = 6;
else if (id_bytes[2] != 0xFF || id_bytes[3] != 0xFF)
ns->geom.idbytes = 4;
else
ns->geom.idbytes = 2;
ns->regs.status = NS_STATUS_OK(ns);
ns->nxstate = STATE_UNKNOWN;
ns->options |= OPT_PAGE512; /* temporary value */
memcpy(ns->ids, id_bytes, sizeof(ns->ids));
if (bus_width == 16) {
ns->busw = 16;
chip->options |= NAND_BUSWIDTH_16;
}
nsmtd->owner = THIS_MODULE;
ret = ns_parse_weakblocks();
if (ret)
goto free_ns_struct;
ret = ns_parse_weakpages();
if (ret)
goto free_wb_list;
ret = ns_parse_gravepages();
if (ret)
goto free_wp_list;
nand_controller_init(&ns->base);
ns->base.ops = &ns_controller_ops;
chip->controller = &ns->base;
ret = nand_scan(chip, 1);
if (ret) {
NS_ERR("Could not scan NAND Simulator device\n");
goto free_gp_list;
}
if (overridesize) {
uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
struct nand_memory_organization *memorg;
u64 targetsize;
memorg = nanddev_get_memorg(&chip->base);
if (new_size >> overridesize != nsmtd->erasesize) {
NS_ERR("overridesize is too big\n");
ret = -EINVAL;
goto cleanup_nand;
}
/* N.B. This relies on nand_scan not doing anything with the size before we change it */
nsmtd->size = new_size;
memorg->eraseblocks_per_lun = 1 << overridesize;
targetsize = nanddev_target_size(&chip->base);
chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
chip->pagemask = (targetsize >> chip->page_shift) - 1;
}
ret = ns_setup_wear_reporting(nsmtd);
if (ret)
goto cleanup_nand;
ret = ns_init(nsmtd);
if (ret)
goto free_ebw;
ret = nand_create_bbt(chip);
if (ret)
goto free_ns_object;
ret = ns_parse_badblocks(ns, nsmtd);
if (ret)
goto free_ns_object;
/* Register NAND partitions */
ret = mtd_device_register(nsmtd, &ns->partitions[0], ns->nbparts);
if (ret)
goto free_ns_object;
ret = ns_debugfs_create(ns);
if (ret)
goto unregister_mtd;
return 0;
unregister_mtd:
WARN_ON(mtd_device_unregister(nsmtd));
free_ns_object:
ns_free(ns);
free_ebw:
kfree(erase_block_wear);
cleanup_nand:
nand_cleanup(chip);
free_gp_list:
list_for_each_safe(pos, n, &grave_pages) {
list_del(pos);
kfree(list_entry(pos, struct grave_page, list));
}
free_wp_list:
list_for_each_safe(pos, n, &weak_pages) {
list_del(pos);
kfree(list_entry(pos, struct weak_page, list));
}
free_wb_list:
list_for_each_safe(pos, n, &weak_blocks) {
list_del(pos);
kfree(list_entry(pos, struct weak_block, list));
}
free_ns_struct:
kfree(ns);
return ret;
}
module_init(ns_init_module);
/*
* Module clean-up function
*/
static void __exit ns_cleanup_module(void)
{
struct nand_chip *chip = mtd_to_nand(nsmtd);
struct nandsim *ns = nand_get_controller_data(chip);
struct list_head *pos, *n;
ns_debugfs_remove(ns);
WARN_ON(mtd_device_unregister(nsmtd));
ns_free(ns);
kfree(erase_block_wear);
nand_cleanup(chip);
list_for_each_safe(pos, n, &grave_pages) {
list_del(pos);
kfree(list_entry(pos, struct grave_page, list));
}
list_for_each_safe(pos, n, &weak_pages) {
list_del(pos);
kfree(list_entry(pos, struct weak_page, list));
}
list_for_each_safe(pos, n, &weak_blocks) {
list_del(pos);
kfree(list_entry(pos, struct weak_block, list));
}
kfree(ns);
}
module_exit(ns_cleanup_module);
MODULE_LICENSE ("GPL");
MODULE_AUTHOR ("Artem B. Bityuckiy");
MODULE_DESCRIPTION ("The NAND flash simulator");
| linux-master | drivers/mtd/nand/raw/nandsim.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/dma/qcom_adm.h>
#include <linux/dma/qcom_bam_dma.h>
#include <linux/module.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/rawnand.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
/* NANDc reg offsets */
#define NAND_FLASH_CMD 0x00
#define NAND_ADDR0 0x04
#define NAND_ADDR1 0x08
#define NAND_FLASH_CHIP_SELECT 0x0c
#define NAND_EXEC_CMD 0x10
#define NAND_FLASH_STATUS 0x14
#define NAND_BUFFER_STATUS 0x18
#define NAND_DEV0_CFG0 0x20
#define NAND_DEV0_CFG1 0x24
#define NAND_DEV0_ECC_CFG 0x28
#define NAND_AUTO_STATUS_EN 0x2c
#define NAND_DEV1_CFG0 0x30
#define NAND_DEV1_CFG1 0x34
#define NAND_READ_ID 0x40
#define NAND_READ_STATUS 0x44
#define NAND_DEV_CMD0 0xa0
#define NAND_DEV_CMD1 0xa4
#define NAND_DEV_CMD2 0xa8
#define NAND_DEV_CMD_VLD 0xac
#define SFLASHC_BURST_CFG 0xe0
#define NAND_ERASED_CW_DETECT_CFG 0xe8
#define NAND_ERASED_CW_DETECT_STATUS 0xec
#define NAND_EBI2_ECC_BUF_CFG 0xf0
#define FLASH_BUF_ACC 0x100
#define NAND_CTRL 0xf00
#define NAND_VERSION 0xf08
#define NAND_READ_LOCATION_0 0xf20
#define NAND_READ_LOCATION_1 0xf24
#define NAND_READ_LOCATION_2 0xf28
#define NAND_READ_LOCATION_3 0xf2c
#define NAND_READ_LOCATION_LAST_CW_0 0xf40
#define NAND_READ_LOCATION_LAST_CW_1 0xf44
#define NAND_READ_LOCATION_LAST_CW_2 0xf48
#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
/* dummy register offsets, used by write_reg_dma */
#define NAND_DEV_CMD1_RESTORE 0xdead
#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
/* NAND_FLASH_CMD bits */
#define PAGE_ACC BIT(4)
#define LAST_PAGE BIT(5)
/* NAND_FLASH_CHIP_SELECT bits */
#define NAND_DEV_SEL 0
#define DM_EN BIT(2)
/* NAND_FLASH_STATUS bits */
#define FS_OP_ERR BIT(4)
#define FS_READY_BSY_N BIT(5)
#define FS_MPU_ERR BIT(8)
#define FS_DEVICE_STS_ERR BIT(16)
#define FS_DEVICE_WP BIT(23)
/* NAND_BUFFER_STATUS bits */
#define BS_UNCORRECTABLE_BIT BIT(8)
#define BS_CORRECTABLE_ERR_MSK 0x1f
/* NAND_DEVn_CFG0 bits */
#define DISABLE_STATUS_AFTER_WRITE 4
#define CW_PER_PAGE 6
#define UD_SIZE_BYTES 9
#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
#define ECC_PARITY_SIZE_BYTES_RS 19
#define SPARE_SIZE_BYTES 23
#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
#define NUM_ADDR_CYCLES 27
#define STATUS_BFR_READ 30
#define SET_RD_MODE_AFTER_STATUS 31
/* NAND_DEVn_CFG0 bits */
#define DEV0_CFG1_ECC_DISABLE 0
#define WIDE_FLASH 1
#define NAND_RECOVERY_CYCLES 2
#define CS_ACTIVE_BSY 5
#define BAD_BLOCK_BYTE_NUM 6
#define BAD_BLOCK_IN_SPARE_AREA 16
#define WR_RD_BSY_GAP 17
#define ENABLE_BCH_ECC 27
/* NAND_DEV0_ECC_CFG bits */
#define ECC_CFG_ECC_DISABLE 0
#define ECC_SW_RESET 1
#define ECC_MODE 4
#define ECC_PARITY_SIZE_BYTES_BCH 8
#define ECC_NUM_DATA_BYTES 16
#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
#define ECC_FORCE_CLK_OPEN 30
/* NAND_DEV_CMD1 bits */
#define READ_ADDR 0
/* NAND_DEV_CMD_VLD bits */
#define READ_START_VLD BIT(0)
#define READ_STOP_VLD BIT(1)
#define WRITE_START_VLD BIT(2)
#define ERASE_START_VLD BIT(3)
#define SEQ_READ_START_VLD BIT(4)
/* NAND_EBI2_ECC_BUF_CFG bits */
#define NUM_STEPS 0
/* NAND_ERASED_CW_DETECT_CFG bits */
#define ERASED_CW_ECC_MASK 1
#define AUTO_DETECT_RES 0
#define MASK_ECC BIT(ERASED_CW_ECC_MASK)
#define RESET_ERASED_DET BIT(AUTO_DETECT_RES)
#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
/* NAND_ERASED_CW_DETECT_STATUS bits */
#define PAGE_ALL_ERASED BIT(7)
#define CODEWORD_ALL_ERASED BIT(6)
#define PAGE_ERASED BIT(5)
#define CODEWORD_ERASED BIT(4)
#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
/* NAND_READ_LOCATION_n bits */
#define READ_LOCATION_OFFSET 0
#define READ_LOCATION_SIZE 16
#define READ_LOCATION_LAST 31
/* Version Mask */
#define NAND_VERSION_MAJOR_MASK 0xf0000000
#define NAND_VERSION_MAJOR_SHIFT 28
#define NAND_VERSION_MINOR_MASK 0x0fff0000
#define NAND_VERSION_MINOR_SHIFT 16
/* NAND OP_CMDs */
#define OP_PAGE_READ 0x2
#define OP_PAGE_READ_WITH_ECC 0x3
#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
#define OP_PAGE_READ_ONFI_READ 0x5
#define OP_PROGRAM_PAGE 0x6
#define OP_PAGE_PROGRAM_WITH_ECC 0x7
#define OP_PROGRAM_PAGE_SPARE 0x9
#define OP_BLOCK_ERASE 0xa
#define OP_CHECK_STATUS 0xc
#define OP_FETCH_ID 0xb
#define OP_RESET_DEVICE 0xd
/* Default Value for NAND_DEV_CMD_VLD */
#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
ERASE_START_VLD | SEQ_READ_START_VLD)
/* NAND_CTRL bits */
#define BAM_MODE_EN BIT(0)
/*
* the NAND controller performs reads/writes with ECC in 516 byte chunks.
* the driver calls the chunks 'step' or 'codeword' interchangeably
*/
#define NANDC_STEP_SIZE 512
/*
* the largest page size we support is 8K, this will have 16 steps/codewords
* of 512 bytes each
*/
#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
/* we read at most 3 registers per codeword scan */
#define MAX_REG_RD (3 * MAX_NUM_STEPS)
/* ECC modes supported by the controller */
#define ECC_NONE BIT(0)
#define ECC_RS_4BIT BIT(1)
#define ECC_BCH_4BIT BIT(2)
#define ECC_BCH_8BIT BIT(3)
#define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc) \
nandc_set_reg(chip, reg, \
((cw_offset) << READ_LOCATION_OFFSET) | \
((read_size) << READ_LOCATION_SIZE) | \
((is_last_read_loc) << READ_LOCATION_LAST))
#define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc) \
nandc_set_reg(chip, reg, \
((cw_offset) << READ_LOCATION_OFFSET) | \
((read_size) << READ_LOCATION_SIZE) | \
((is_last_read_loc) << READ_LOCATION_LAST))
/*
* Returns the actual register address for all NAND_DEV_ registers
* (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
*/
#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
/* Returns the NAND register physical address */
#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
/* Returns the dma address for reg read buffer */
#define reg_buf_dma_addr(chip, vaddr) \
((chip)->reg_read_dma + \
((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
#define QPIC_PER_CW_CMD_ELEMENTS 32
#define QPIC_PER_CW_CMD_SGL 32
#define QPIC_PER_CW_DATA_SGL 8
#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
/*
* Flags used in DMA descriptor preparation helper functions
* (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
*/
/* Don't set the EOT in current tx BAM sgl */
#define NAND_BAM_NO_EOT BIT(0)
/* Set the NWD flag in current BAM sgl */
#define NAND_BAM_NWD BIT(1)
/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
#define NAND_BAM_NEXT_SGL BIT(2)
/*
* Erased codeword status is being used two times in single transfer so this
* flag will determine the current value of erased codeword status register
*/
#define NAND_ERASED_CW_SET BIT(4)
#define MAX_ADDRESS_CYCLE 5
/*
* This data type corresponds to the BAM transaction which will be used for all
* NAND transfers.
* @bam_ce - the array of BAM command elements
* @cmd_sgl - sgl for NAND BAM command pipe
* @data_sgl - sgl for NAND BAM consumer/producer pipe
* @last_data_desc - last DMA desc in data channel (tx/rx).
* @last_cmd_desc - last DMA desc in command channel.
* @txn_done - completion for NAND transfer.
* @bam_ce_pos - the index in bam_ce which is available for next sgl
* @bam_ce_start - the index in bam_ce which marks the start position ce
* for current sgl. It will be used for size calculation
* for current sgl
* @cmd_sgl_pos - current index in command sgl.
* @cmd_sgl_start - start index in command sgl.
* @tx_sgl_pos - current index in data sgl for tx.
* @tx_sgl_start - start index in data sgl for tx.
* @rx_sgl_pos - current index in data sgl for rx.
* @rx_sgl_start - start index in data sgl for rx.
* @wait_second_completion - wait for second DMA desc completion before making
* the NAND transfer completion.
*/
struct bam_transaction {
struct bam_cmd_element *bam_ce;
struct scatterlist *cmd_sgl;
struct scatterlist *data_sgl;
struct dma_async_tx_descriptor *last_data_desc;
struct dma_async_tx_descriptor *last_cmd_desc;
struct completion txn_done;
u32 bam_ce_pos;
u32 bam_ce_start;
u32 cmd_sgl_pos;
u32 cmd_sgl_start;
u32 tx_sgl_pos;
u32 tx_sgl_start;
u32 rx_sgl_pos;
u32 rx_sgl_start;
bool wait_second_completion;
};
/*
* This data type corresponds to the nand dma descriptor
* @dma_desc - low level DMA engine descriptor
* @list - list for desc_info
*
* @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
* ADM
* @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
* @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
* @dir - DMA transfer direction
*/
struct desc_info {
struct dma_async_tx_descriptor *dma_desc;
struct list_head node;
union {
struct scatterlist adm_sgl;
struct {
struct scatterlist *bam_sgl;
int sgl_cnt;
};
};
enum dma_data_direction dir;
};
/*
* holds the current register values that we want to write. acts as a contiguous
* chunk of memory which we use to write the controller registers through DMA.
*/
struct nandc_regs {
__le32 cmd;
__le32 addr0;
__le32 addr1;
__le32 chip_sel;
__le32 exec;
__le32 cfg0;
__le32 cfg1;
__le32 ecc_bch_cfg;
__le32 clrflashstatus;
__le32 clrreadstatus;
__le32 cmd1;
__le32 vld;
__le32 orig_cmd1;
__le32 orig_vld;
__le32 ecc_buf_cfg;
__le32 read_location0;
__le32 read_location1;
__le32 read_location2;
__le32 read_location3;
__le32 read_location_last0;
__le32 read_location_last1;
__le32 read_location_last2;
__le32 read_location_last3;
__le32 erased_cw_detect_cfg_clr;
__le32 erased_cw_detect_cfg_set;
};
/*
* NAND controller data struct
*
* @dev: parent device
*
* @base: MMIO base
*
* @core_clk: controller clock
* @aon_clk: another controller clock
*
* @regs: a contiguous chunk of memory for DMA register
* writes. contains the register values to be
* written to controller
*
* @props: properties of current NAND controller,
* initialized via DT match data
*
* @controller: base controller structure
* @host_list: list containing all the chips attached to the
* controller
*
* @chan: dma channel
* @cmd_crci: ADM DMA CRCI for command flow control
* @data_crci: ADM DMA CRCI for data flow control
*
* @desc_list: DMA descriptor list (list of desc_infos)
*
* @data_buffer: our local DMA buffer for page read/writes,
* used when we can't use the buffer provided
* by upper layers directly
* @reg_read_buf: local buffer for reading back registers via DMA
*
* @base_phys: physical base address of controller registers
* @base_dma: dma base address of controller registers
* @reg_read_dma: contains dma address for register read buffer
*
* @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
* functions
* @max_cwperpage: maximum QPIC codewords required. calculated
* from all connected NAND devices pagesize
*
* @reg_read_pos: marker for data read in reg_read_buf
*
* @cmd1/vld: some fixed controller register values
*
* @exec_opwrite: flag to select correct number of code word
* while reading status
*/
struct qcom_nand_controller {
struct device *dev;
void __iomem *base;
struct clk *core_clk;
struct clk *aon_clk;
struct nandc_regs *regs;
struct bam_transaction *bam_txn;
const struct qcom_nandc_props *props;
struct nand_controller controller;
struct list_head host_list;
union {
/* will be used only by QPIC for BAM DMA */
struct {
struct dma_chan *tx_chan;
struct dma_chan *rx_chan;
struct dma_chan *cmd_chan;
};
/* will be used only by EBI2 for ADM DMA */
struct {
struct dma_chan *chan;
unsigned int cmd_crci;
unsigned int data_crci;
};
};
struct list_head desc_list;
u8 *data_buffer;
__le32 *reg_read_buf;
phys_addr_t base_phys;
dma_addr_t base_dma;
dma_addr_t reg_read_dma;
int buf_size;
int buf_count;
int buf_start;
unsigned int max_cwperpage;
int reg_read_pos;
u32 cmd1, vld;
bool exec_opwrite;
};
/*
* NAND special boot partitions
*
* @page_offset: offset of the partition where spare data is not protected
* by ECC (value in pages)
* @page_offset: size of the partition where spare data is not protected
* by ECC (value in pages)
*/
struct qcom_nand_boot_partition {
u32 page_offset;
u32 page_size;
};
/*
* Qcom op for each exec_op transfer
*
* @data_instr: data instruction pointer
* @data_instr_idx: data instruction index
* @rdy_timeout_ms: wait ready timeout in ms
* @rdy_delay_ns: Additional delay in ns
* @addr1_reg: Address1 register value
* @addr2_reg: Address2 register value
* @cmd_reg: CMD register value
* @flag: flag for misc instruction
*/
struct qcom_op {
const struct nand_op_instr *data_instr;
unsigned int data_instr_idx;
unsigned int rdy_timeout_ms;
unsigned int rdy_delay_ns;
u32 addr1_reg;
u32 addr2_reg;
u32 cmd_reg;
u8 flag;
};
/*
* NAND chip structure
*
* @boot_partitions: array of boot partitions where offset and size of the
* boot partitions are stored
*
* @chip: base NAND chip structure
* @node: list node to add itself to host_list in
* qcom_nand_controller
*
* @nr_boot_partitions: count of the boot partitions where spare data is not
* protected by ECC
*
* @cs: chip select value for this chip
* @cw_size: the number of bytes in a single step/codeword
* of a page, consisting of all data, ecc, spare
* and reserved bytes
* @cw_data: the number of bytes within a codeword protected
* by ECC
* @ecc_bytes_hw: ECC bytes used by controller hardware for this
* chip
*
* @last_command: keeps track of last command on this chip. used
* for reading correct status
*
* @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
* ecc/non-ecc mode for the current nand flash
* device
*
* @status: value to be returned if NAND_CMD_STATUS command
* is executed
* @codeword_fixup: keep track of the current layout used by
* the driver for read/write operation.
* @use_ecc: request the controller to use ECC for the
* upcoming read/write
* @bch_enabled: flag to tell whether BCH ECC mode is used
*/
struct qcom_nand_host {
struct qcom_nand_boot_partition *boot_partitions;
struct nand_chip chip;
struct list_head node;
int nr_boot_partitions;
int cs;
int cw_size;
int cw_data;
int ecc_bytes_hw;
int spare_bytes;
int bbm_size;
int last_command;
u32 cfg0, cfg1;
u32 cfg0_raw, cfg1_raw;
u32 ecc_buf_cfg;
u32 ecc_bch_cfg;
u32 clrflashstatus;
u32 clrreadstatus;
u8 status;
bool codeword_fixup;
bool use_ecc;
bool bch_enabled;
};
/*
* This data type corresponds to the NAND controller properties which varies
* among different NAND controllers.
* @ecc_modes - ecc mode for NAND
* @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
* @is_bam - whether NAND controller is using BAM
* @is_qpic - whether NAND CTRL is part of qpic IP
* @qpic_v2 - flag to indicate QPIC IP version 2
* @use_codeword_fixup - whether NAND has different layout for boot partitions
*/
struct qcom_nandc_props {
u32 ecc_modes;
u32 dev_cmd_reg_start;
bool is_bam;
bool is_qpic;
bool qpic_v2;
bool use_codeword_fixup;
};
/* Frees the BAM transaction memory */
static void free_bam_transaction(struct qcom_nand_controller *nandc)
{
struct bam_transaction *bam_txn = nandc->bam_txn;
devm_kfree(nandc->dev, bam_txn);
}
/* Allocates and Initializes the BAM transaction */
static struct bam_transaction *
alloc_bam_transaction(struct qcom_nand_controller *nandc)
{
struct bam_transaction *bam_txn;
size_t bam_txn_size;
unsigned int num_cw = nandc->max_cwperpage;
void *bam_txn_buf;
bam_txn_size =
sizeof(*bam_txn) + num_cw *
((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
(sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
if (!bam_txn_buf)
return NULL;
bam_txn = bam_txn_buf;
bam_txn_buf += sizeof(*bam_txn);
bam_txn->bam_ce = bam_txn_buf;
bam_txn_buf +=
sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
bam_txn->cmd_sgl = bam_txn_buf;
bam_txn_buf +=
sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
bam_txn->data_sgl = bam_txn_buf;
init_completion(&bam_txn->txn_done);
return bam_txn;
}
/* Clears the BAM transaction indexes */
static void clear_bam_transaction(struct qcom_nand_controller *nandc)
{
struct bam_transaction *bam_txn = nandc->bam_txn;
if (!nandc->props->is_bam)
return;
bam_txn->bam_ce_pos = 0;
bam_txn->bam_ce_start = 0;
bam_txn->cmd_sgl_pos = 0;
bam_txn->cmd_sgl_start = 0;
bam_txn->tx_sgl_pos = 0;
bam_txn->tx_sgl_start = 0;
bam_txn->rx_sgl_pos = 0;
bam_txn->rx_sgl_start = 0;
bam_txn->last_data_desc = NULL;
bam_txn->wait_second_completion = false;
sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
QPIC_PER_CW_CMD_SGL);
sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
QPIC_PER_CW_DATA_SGL);
reinit_completion(&bam_txn->txn_done);
}
/* Callback for DMA descriptor completion */
static void qpic_bam_dma_done(void *data)
{
struct bam_transaction *bam_txn = data;
/*
* In case of data transfer with NAND, 2 callbacks will be generated.
* One for command channel and another one for data channel.
* If current transaction has data descriptors
* (i.e. wait_second_completion is true), then set this to false
* and wait for second DMA descriptor completion.
*/
if (bam_txn->wait_second_completion)
bam_txn->wait_second_completion = false;
else
complete(&bam_txn->txn_done);
}
static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
{
return container_of(chip, struct qcom_nand_host, chip);
}
static inline struct qcom_nand_controller *
get_qcom_nand_controller(struct nand_chip *chip)
{
return container_of(chip->controller, struct qcom_nand_controller,
controller);
}
static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
{
return ioread32(nandc->base + offset);
}
static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
u32 val)
{
iowrite32(val, nandc->base + offset);
}
static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
bool is_cpu)
{
if (!nandc->props->is_bam)
return;
if (is_cpu)
dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
MAX_REG_RD *
sizeof(*nandc->reg_read_buf),
DMA_FROM_DEVICE);
else
dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
MAX_REG_RD *
sizeof(*nandc->reg_read_buf),
DMA_FROM_DEVICE);
}
static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
{
switch (offset) {
case NAND_FLASH_CMD:
return ®s->cmd;
case NAND_ADDR0:
return ®s->addr0;
case NAND_ADDR1:
return ®s->addr1;
case NAND_FLASH_CHIP_SELECT:
return ®s->chip_sel;
case NAND_EXEC_CMD:
return ®s->exec;
case NAND_FLASH_STATUS:
return ®s->clrflashstatus;
case NAND_DEV0_CFG0:
return ®s->cfg0;
case NAND_DEV0_CFG1:
return ®s->cfg1;
case NAND_DEV0_ECC_CFG:
return ®s->ecc_bch_cfg;
case NAND_READ_STATUS:
return ®s->clrreadstatus;
case NAND_DEV_CMD1:
return ®s->cmd1;
case NAND_DEV_CMD1_RESTORE:
return ®s->orig_cmd1;
case NAND_DEV_CMD_VLD:
return ®s->vld;
case NAND_DEV_CMD_VLD_RESTORE:
return ®s->orig_vld;
case NAND_EBI2_ECC_BUF_CFG:
return ®s->ecc_buf_cfg;
case NAND_READ_LOCATION_0:
return ®s->read_location0;
case NAND_READ_LOCATION_1:
return ®s->read_location1;
case NAND_READ_LOCATION_2:
return ®s->read_location2;
case NAND_READ_LOCATION_3:
return ®s->read_location3;
case NAND_READ_LOCATION_LAST_CW_0:
return ®s->read_location_last0;
case NAND_READ_LOCATION_LAST_CW_1:
return ®s->read_location_last1;
case NAND_READ_LOCATION_LAST_CW_2:
return ®s->read_location_last2;
case NAND_READ_LOCATION_LAST_CW_3:
return ®s->read_location_last3;
default:
return NULL;
}
}
static void nandc_set_reg(struct nand_chip *chip, int offset,
u32 val)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nandc_regs *regs = nandc->regs;
__le32 *reg;
reg = offset_to_nandc_reg(regs, offset);
if (reg)
*reg = cpu_to_le32(val);
}
/* Helper to check the code word, whether it is last cw or not */
static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
{
return cw == (ecc->steps - 1);
}
/* helper to configure location register values */
static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
int cw_offset, int read_size, int is_last_read_loc)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int reg_base = NAND_READ_LOCATION_0;
if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
reg_base = NAND_READ_LOCATION_LAST_CW_0;
reg_base += reg * 4;
if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
return nandc_set_read_loc_last(chip, reg_base, cw_offset,
read_size, is_last_read_loc);
else
return nandc_set_read_loc_first(chip, reg_base, cw_offset,
read_size, is_last_read_loc);
}
/* helper to configure address register values */
static void set_address(struct qcom_nand_host *host, u16 column, int page)
{
struct nand_chip *chip = &host->chip;
if (chip->options & NAND_BUSWIDTH_16)
column >>= 1;
nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
}
/*
* update_rw_regs: set up read/write register values, these will be
* written to the NAND controller registers via DMA
*
* @num_cw: number of steps for the read/write operation
* @read: read or write operation
* @cw : which code word
*/
static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
{
struct nand_chip *chip = &host->chip;
u32 cmd, cfg0, cfg1, ecc_bch_cfg;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
if (read) {
if (host->use_ecc)
cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
else
cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
} else {
cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
}
if (host->use_ecc) {
cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
(num_cw - 1) << CW_PER_PAGE;
cfg1 = host->cfg1;
ecc_bch_cfg = host->ecc_bch_cfg;
} else {
cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
(num_cw - 1) << CW_PER_PAGE;
cfg1 = host->cfg1_raw;
ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
}
nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
if (!nandc->props->qpic_v2)
nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
nandc_set_reg(chip, NAND_EXEC_CMD, 1);
if (read)
nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
host->cw_data : host->cw_size, 1);
}
/*
* Maps the scatter gather list for DMA transfer and forms the DMA descriptor
* for BAM. This descriptor will be added in the NAND DMA descriptor queue
* which will be submitted to DMA engine.
*/
static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
struct dma_chan *chan,
unsigned long flags)
{
struct desc_info *desc;
struct scatterlist *sgl;
unsigned int sgl_cnt;
int ret;
struct bam_transaction *bam_txn = nandc->bam_txn;
enum dma_transfer_direction dir_eng;
struct dma_async_tx_descriptor *dma_desc;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
if (chan == nandc->cmd_chan) {
sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
dir_eng = DMA_MEM_TO_DEV;
desc->dir = DMA_TO_DEVICE;
} else if (chan == nandc->tx_chan) {
sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
dir_eng = DMA_MEM_TO_DEV;
desc->dir = DMA_TO_DEVICE;
} else {
sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
dir_eng = DMA_DEV_TO_MEM;
desc->dir = DMA_FROM_DEVICE;
}
sg_mark_end(sgl + sgl_cnt - 1);
ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
if (ret == 0) {
dev_err(nandc->dev, "failure in mapping desc\n");
kfree(desc);
return -ENOMEM;
}
desc->sgl_cnt = sgl_cnt;
desc->bam_sgl = sgl;
dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
flags);
if (!dma_desc) {
dev_err(nandc->dev, "failure in prep desc\n");
dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
kfree(desc);
return -EINVAL;
}
desc->dma_desc = dma_desc;
/* update last data/command descriptor */
if (chan == nandc->cmd_chan)
bam_txn->last_cmd_desc = dma_desc;
else
bam_txn->last_data_desc = dma_desc;
list_add_tail(&desc->node, &nandc->desc_list);
return 0;
}
/*
* Prepares the command descriptor for BAM DMA which will be used for NAND
* register reads and writes. The command descriptor requires the command
* to be formed in command element type so this function uses the command
* element from bam transaction ce array and fills the same with required
* data. A single SGL can contain multiple command elements so
* NAND_BAM_NEXT_SGL will be used for starting the separate SGL
* after the current command element.
*/
static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
int reg_off, const void *vaddr,
int size, unsigned int flags)
{
int bam_ce_size;
int i, ret;
struct bam_cmd_element *bam_ce_buffer;
struct bam_transaction *bam_txn = nandc->bam_txn;
bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
/* fill the command desc */
for (i = 0; i < size; i++) {
if (read)
bam_prep_ce(&bam_ce_buffer[i],
nandc_reg_phys(nandc, reg_off + 4 * i),
BAM_READ_COMMAND,
reg_buf_dma_addr(nandc,
(__le32 *)vaddr + i));
else
bam_prep_ce_le32(&bam_ce_buffer[i],
nandc_reg_phys(nandc, reg_off + 4 * i),
BAM_WRITE_COMMAND,
*((__le32 *)vaddr + i));
}
bam_txn->bam_ce_pos += size;
/* use the separate sgl after this command */
if (flags & NAND_BAM_NEXT_SGL) {
bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
bam_ce_size = (bam_txn->bam_ce_pos -
bam_txn->bam_ce_start) *
sizeof(struct bam_cmd_element);
sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
bam_ce_buffer, bam_ce_size);
bam_txn->cmd_sgl_pos++;
bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
if (flags & NAND_BAM_NWD) {
ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
DMA_PREP_FENCE |
DMA_PREP_CMD);
if (ret)
return ret;
}
}
return 0;
}
/*
* Prepares the data descriptor for BAM DMA which will be used for NAND
* data reads and writes.
*/
static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
const void *vaddr,
int size, unsigned int flags)
{
int ret;
struct bam_transaction *bam_txn = nandc->bam_txn;
if (read) {
sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
vaddr, size);
bam_txn->rx_sgl_pos++;
} else {
sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
vaddr, size);
bam_txn->tx_sgl_pos++;
/*
* BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
* is not set, form the DMA descriptor
*/
if (!(flags & NAND_BAM_NO_EOT)) {
ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
DMA_PREP_INTERRUPT);
if (ret)
return ret;
}
}
return 0;
}
static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
int reg_off, const void *vaddr, int size,
bool flow_control)
{
struct desc_info *desc;
struct dma_async_tx_descriptor *dma_desc;
struct scatterlist *sgl;
struct dma_slave_config slave_conf;
struct qcom_adm_peripheral_config periph_conf = {};
enum dma_transfer_direction dir_eng;
int ret;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
sgl = &desc->adm_sgl;
sg_init_one(sgl, vaddr, size);
if (read) {
dir_eng = DMA_DEV_TO_MEM;
desc->dir = DMA_FROM_DEVICE;
} else {
dir_eng = DMA_MEM_TO_DEV;
desc->dir = DMA_TO_DEVICE;
}
ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
if (ret == 0) {
ret = -ENOMEM;
goto err;
}
memset(&slave_conf, 0x00, sizeof(slave_conf));
slave_conf.device_fc = flow_control;
if (read) {
slave_conf.src_maxburst = 16;
slave_conf.src_addr = nandc->base_dma + reg_off;
if (nandc->data_crci) {
periph_conf.crci = nandc->data_crci;
slave_conf.peripheral_config = &periph_conf;
slave_conf.peripheral_size = sizeof(periph_conf);
}
} else {
slave_conf.dst_maxburst = 16;
slave_conf.dst_addr = nandc->base_dma + reg_off;
if (nandc->cmd_crci) {
periph_conf.crci = nandc->cmd_crci;
slave_conf.peripheral_config = &periph_conf;
slave_conf.peripheral_size = sizeof(periph_conf);
}
}
ret = dmaengine_slave_config(nandc->chan, &slave_conf);
if (ret) {
dev_err(nandc->dev, "failed to configure dma channel\n");
goto err;
}
dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
if (!dma_desc) {
dev_err(nandc->dev, "failed to prepare desc\n");
ret = -EINVAL;
goto err;
}
desc->dma_desc = dma_desc;
list_add_tail(&desc->node, &nandc->desc_list);
return 0;
err:
kfree(desc);
return ret;
}
/*
* read_reg_dma: prepares a descriptor to read a given number of
* contiguous registers to the reg_read_buf pointer
*
* @first: offset of the first register in the contiguous block
* @num_regs: number of registers to read
* @flags: flags to control DMA descriptor preparation
*/
static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
int num_regs, unsigned int flags)
{
bool flow_control = false;
void *vaddr;
vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
nandc->reg_read_pos += num_regs;
if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
first = dev_cmd_reg_addr(nandc, first);
if (nandc->props->is_bam)
return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
num_regs, flags);
if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
flow_control = true;
return prep_adm_dma_desc(nandc, true, first, vaddr,
num_regs * sizeof(u32), flow_control);
}
/*
* write_reg_dma: prepares a descriptor to write a given number of
* contiguous registers
*
* @first: offset of the first register in the contiguous block
* @num_regs: number of registers to write
* @flags: flags to control DMA descriptor preparation
*/
static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
int num_regs, unsigned int flags)
{
bool flow_control = false;
struct nandc_regs *regs = nandc->regs;
void *vaddr;
vaddr = offset_to_nandc_reg(regs, first);
if (first == NAND_ERASED_CW_DETECT_CFG) {
if (flags & NAND_ERASED_CW_SET)
vaddr = ®s->erased_cw_detect_cfg_set;
else
vaddr = ®s->erased_cw_detect_cfg_clr;
}
if (first == NAND_EXEC_CMD)
flags |= NAND_BAM_NWD;
if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
if (nandc->props->is_bam)
return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
num_regs, flags);
if (first == NAND_FLASH_CMD)
flow_control = true;
return prep_adm_dma_desc(nandc, false, first, vaddr,
num_regs * sizeof(u32), flow_control);
}
/*
* read_data_dma: prepares a DMA descriptor to transfer data from the
* controller's internal buffer to the buffer 'vaddr'
*
* @reg_off: offset within the controller's data buffer
* @vaddr: virtual address of the buffer we want to write to
* @size: DMA transaction size in bytes
* @flags: flags to control DMA descriptor preparation
*/
static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
const u8 *vaddr, int size, unsigned int flags)
{
if (nandc->props->is_bam)
return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
}
/*
* write_data_dma: prepares a DMA descriptor to transfer data from
* 'vaddr' to the controller's internal buffer
*
* @reg_off: offset within the controller's data buffer
* @vaddr: virtual address of the buffer we want to read from
* @size: DMA transaction size in bytes
* @flags: flags to control DMA descriptor preparation
*/
static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
const u8 *vaddr, int size, unsigned int flags)
{
if (nandc->props->is_bam)
return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
}
/*
* Helper to prepare DMA descriptors for configuring registers
* before reading a NAND page.
*/
static void config_nand_page_read(struct nand_chip *chip)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
write_reg_dma(nandc, NAND_ADDR0, 2, 0);
write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
if (!nandc->props->qpic_v2)
write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
}
/*
* Helper to prepare DMA descriptors for configuring registers
* before reading each codeword in NAND page.
*/
static void
config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int reg = NAND_READ_LOCATION_0;
if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
reg = NAND_READ_LOCATION_LAST_CW_0;
if (nandc->props->is_bam)
write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
if (use_ecc) {
read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
NAND_BAM_NEXT_SGL);
} else {
read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
}
}
/*
* Helper to prepare dma descriptors to configure registers needed for reading a
* single codeword in page
*/
static void
config_nand_single_cw_page_read(struct nand_chip *chip,
bool use_ecc, int cw)
{
config_nand_page_read(chip);
config_nand_cw_read(chip, use_ecc, cw);
}
/*
* Helper to prepare DMA descriptors used to configure registers needed for
* before writing a NAND page.
*/
static void config_nand_page_write(struct nand_chip *chip)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
write_reg_dma(nandc, NAND_ADDR0, 2, 0);
write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
if (!nandc->props->qpic_v2)
write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
NAND_BAM_NEXT_SGL);
}
/*
* Helper to prepare DMA descriptors for configuring registers
* before writing each codeword in NAND page.
*/
static void config_nand_cw_write(struct nand_chip *chip)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
}
/* helpers to submit/free our list of dma descriptors */
static int submit_descs(struct qcom_nand_controller *nandc)
{
struct desc_info *desc, *n;
dma_cookie_t cookie = 0;
struct bam_transaction *bam_txn = nandc->bam_txn;
int ret = 0;
if (nandc->props->is_bam) {
if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
if (ret)
goto err_unmap_free_desc;
}
if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
DMA_PREP_INTERRUPT);
if (ret)
goto err_unmap_free_desc;
}
if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
DMA_PREP_CMD);
if (ret)
goto err_unmap_free_desc;
}
}
list_for_each_entry(desc, &nandc->desc_list, node)
cookie = dmaengine_submit(desc->dma_desc);
if (nandc->props->is_bam) {
bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
bam_txn->last_cmd_desc->callback_param = bam_txn;
if (bam_txn->last_data_desc) {
bam_txn->last_data_desc->callback = qpic_bam_dma_done;
bam_txn->last_data_desc->callback_param = bam_txn;
bam_txn->wait_second_completion = true;
}
dma_async_issue_pending(nandc->tx_chan);
dma_async_issue_pending(nandc->rx_chan);
dma_async_issue_pending(nandc->cmd_chan);
if (!wait_for_completion_timeout(&bam_txn->txn_done,
QPIC_NAND_COMPLETION_TIMEOUT))
ret = -ETIMEDOUT;
} else {
if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
ret = -ETIMEDOUT;
}
err_unmap_free_desc:
/*
* Unmap the dma sg_list and free the desc allocated by both
* prepare_bam_async_desc() and prep_adm_dma_desc() functions.
*/
list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
list_del(&desc->node);
if (nandc->props->is_bam)
dma_unmap_sg(nandc->dev, desc->bam_sgl,
desc->sgl_cnt, desc->dir);
else
dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
desc->dir);
kfree(desc);
}
return ret;
}
/* reset the register read buffer for next NAND operation */
static void clear_read_regs(struct qcom_nand_controller *nandc)
{
nandc->reg_read_pos = 0;
nandc_read_buffer_sync(nandc, false);
}
/*
* when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
* an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
*
* when using RS ECC, the HW reports the same erros when reading an erased CW,
* but it notifies that it is an erased CW by placing special characters at
* certain offsets in the buffer.
*
* verify if the page is erased or not, and fix up the page for RS ECC by
* replacing the special characters with 0xff.
*/
static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
{
u8 empty1, empty2;
/*
* an erased page flags an error in NAND_FLASH_STATUS, check if the page
* is erased by looking for 0x54s at offsets 3 and 175 from the
* beginning of each codeword
*/
empty1 = data_buf[3];
empty2 = data_buf[175];
/*
* if the erased codework markers, if they exist override them with
* 0xffs
*/
if ((empty1 == 0x54 && empty2 == 0xff) ||
(empty1 == 0xff && empty2 == 0x54)) {
data_buf[3] = 0xff;
data_buf[175] = 0xff;
}
/*
* check if the entire chunk contains 0xffs or not. if it doesn't, then
* restore the original values at the special offsets
*/
if (memchr_inv(data_buf, 0xff, data_len)) {
data_buf[3] = empty1;
data_buf[175] = empty2;
return false;
}
return true;
}
struct read_stats {
__le32 flash;
__le32 buffer;
__le32 erased_cw;
};
/* reads back FLASH_STATUS register set by the controller */
static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
{
struct nand_chip *chip = &host->chip;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
int i;
nandc_read_buffer_sync(nandc, true);
for (i = 0; i < cw_cnt; i++) {
u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
if (flash & (FS_OP_ERR | FS_MPU_ERR))
return -EIO;
}
return 0;
}
/* performs raw read for one codeword */
static int
qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
u8 *data_buf, u8 *oob_buf, int page, int cw)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int data_size1, data_size2, oob_size1, oob_size2;
int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
int raw_cw = cw;
nand_read_page_op(chip, page, 0, NULL, 0);
nandc->buf_count = 0;
nandc->buf_start = 0;
clear_read_regs(nandc);
host->use_ecc = false;
if (nandc->props->qpic_v2)
raw_cw = ecc->steps - 1;
clear_bam_transaction(nandc);
set_address(host, host->cw_size * cw, page);
update_rw_regs(host, 1, true, raw_cw);
config_nand_page_read(chip);
data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
oob_size1 = host->bbm_size;
if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
data_size2 = ecc->size - data_size1 -
((ecc->steps - 1) * 4);
oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
host->spare_bytes;
} else {
data_size2 = host->cw_data - data_size1;
oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
}
if (nandc->props->is_bam) {
nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
read_loc += data_size1;
nandc_set_read_loc(chip, cw, 1, read_loc, oob_size1, 0);
read_loc += oob_size1;
nandc_set_read_loc(chip, cw, 2, read_loc, data_size2, 0);
read_loc += data_size2;
nandc_set_read_loc(chip, cw, 3, read_loc, oob_size2, 1);
}
config_nand_cw_read(chip, false, raw_cw);
read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
reg_off += data_size1;
read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
reg_off += oob_size1;
read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
reg_off += data_size2;
read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
ret = submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
return ret;
}
return check_flash_errors(host, 1);
}
/*
* Bitflips can happen in erased codewords also so this function counts the
* number of 0 in each CW for which ECC engine returns the uncorrectable
* error. The page will be assumed as erased if this count is less than or
* equal to the ecc->strength for each CW.
*
* 1. Both DATA and OOB need to be checked for number of 0. The
* top-level API can be called with only data buf or OOB buf so use
* chip->data_buf if data buf is null and chip->oob_poi if oob buf
* is null for copying the raw bytes.
* 2. Perform raw read for all the CW which has uncorrectable errors.
* 3. For each CW, check the number of 0 in cw_data and usable OOB bytes.
* The BBM and spare bytes bit flip won’t affect the ECC so don’t check
* the number of bitflips in this area.
*/
static int
check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
u8 *oob_buf, unsigned long uncorrectable_cws,
int page, unsigned int max_bitflips)
{
struct nand_chip *chip = &host->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *cw_data_buf, *cw_oob_buf;
int cw, data_size, oob_size, ret;
if (!data_buf)
data_buf = nand_get_data_buf(chip);
if (!oob_buf) {
nand_get_data_buf(chip);
oob_buf = chip->oob_poi;
}
for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
data_size = ecc->size - ((ecc->steps - 1) * 4);
oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
} else {
data_size = host->cw_data;
oob_size = host->ecc_bytes_hw;
}
/* determine starting buffer address for current CW */
cw_data_buf = data_buf + (cw * host->cw_data);
cw_oob_buf = oob_buf + (cw * ecc->bytes);
ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
cw_oob_buf, page, cw);
if (ret)
return ret;
/*
* make sure it isn't an erased page reported
* as not-erased by HW because of a few bitflips
*/
ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
cw_oob_buf + host->bbm_size,
oob_size, NULL,
0, ecc->strength);
if (ret < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += ret;
max_bitflips = max_t(unsigned int, max_bitflips, ret);
}
}
return max_bitflips;
}
/*
* reads back status registers set by the controller to notify page read
* errors. this is equivalent to what 'ecc->correct()' would do.
*/
static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
u8 *oob_buf, int page)
{
struct nand_chip *chip = &host->chip;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
unsigned int max_bitflips = 0, uncorrectable_cws = 0;
struct read_stats *buf;
bool flash_op_err = false, erased;
int i;
u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
buf = (struct read_stats *)nandc->reg_read_buf;
nandc_read_buffer_sync(nandc, true);
for (i = 0; i < ecc->steps; i++, buf++) {
u32 flash, buffer, erased_cw;
int data_len, oob_len;
if (qcom_nandc_is_last_cw(ecc, i)) {
data_len = ecc->size - ((ecc->steps - 1) << 2);
oob_len = ecc->steps << 2;
} else {
data_len = host->cw_data;
oob_len = 0;
}
flash = le32_to_cpu(buf->flash);
buffer = le32_to_cpu(buf->buffer);
erased_cw = le32_to_cpu(buf->erased_cw);
/*
* Check ECC failure for each codeword. ECC failure can
* happen in either of the following conditions
* 1. If number of bitflips are greater than ECC engine
* capability.
* 2. If this codeword contains all 0xff for which erased
* codeword detection check will be done.
*/
if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
/*
* For BCH ECC, ignore erased codeword errors, if
* ERASED_CW bits are set.
*/
if (host->bch_enabled) {
erased = (erased_cw & ERASED_CW) == ERASED_CW;
/*
* For RS ECC, HW reports the erased CW by placing
* special characters at certain offsets in the buffer.
* These special characters will be valid only if
* complete page is read i.e. data_buf is not NULL.
*/
} else if (data_buf) {
erased = erased_chunk_check_and_fixup(data_buf,
data_len);
} else {
erased = false;
}
if (!erased)
uncorrectable_cws |= BIT(i);
/*
* Check if MPU or any other operational error (timeout,
* device failure, etc.) happened for this codeword and
* make flash_op_err true. If flash_op_err is set, then
* EIO will be returned for page read.
*/
} else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
flash_op_err = true;
/*
* No ECC or operational errors happened. Check the number of
* bits corrected and update the ecc_stats.corrected.
*/
} else {
unsigned int stat;
stat = buffer & BS_CORRECTABLE_ERR_MSK;
mtd->ecc_stats.corrected += stat;
max_bitflips = max(max_bitflips, stat);
}
if (data_buf)
data_buf += data_len;
if (oob_buf)
oob_buf += oob_len + ecc->bytes;
}
if (flash_op_err)
return -EIO;
if (!uncorrectable_cws)
return max_bitflips;
return check_for_erased_page(host, data_buf_start, oob_buf_start,
uncorrectable_cws, page,
max_bitflips);
}
/*
* helper to perform the actual page read operation, used by ecc->read_page(),
* ecc->read_oob()
*/
static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
u8 *oob_buf, int page)
{
struct nand_chip *chip = &host->chip;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
int i, ret;
config_nand_page_read(chip);
/* queue cmd descs for each codeword */
for (i = 0; i < ecc->steps; i++) {
int data_size, oob_size;
if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
data_size = ecc->size - ((ecc->steps - 1) << 2);
oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
host->spare_bytes;
} else {
data_size = host->cw_data;
oob_size = host->ecc_bytes_hw + host->spare_bytes;
}
if (nandc->props->is_bam) {
if (data_buf && oob_buf) {
nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
nandc_set_read_loc(chip, i, 1, data_size,
oob_size, 1);
} else if (data_buf) {
nandc_set_read_loc(chip, i, 0, 0, data_size, 1);
} else {
nandc_set_read_loc(chip, i, 0, data_size,
oob_size, 1);
}
}
config_nand_cw_read(chip, true, i);
if (data_buf)
read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
data_size, 0);
/*
* when ecc is enabled, the controller doesn't read the real
* or dummy bad block markers in each chunk. To maintain a
* consistent layout across RAW and ECC reads, we just
* leave the real/dummy BBM offsets empty (i.e, filled with
* 0xffs)
*/
if (oob_buf) {
int j;
for (j = 0; j < host->bbm_size; j++)
*oob_buf++ = 0xff;
read_data_dma(nandc, FLASH_BUF_ACC + data_size,
oob_buf, oob_size, 0);
}
if (data_buf)
data_buf += data_size;
if (oob_buf)
oob_buf += oob_size;
}
ret = submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to read page/oob\n");
return ret;
}
return parse_read_errors(host, data_buf_start, oob_buf_start, page);
}
/*
* a helper that copies the last step/codeword of a page (containing free oob)
* into our local buffer
*/
static int copy_last_cw(struct qcom_nand_host *host, int page)
{
struct nand_chip *chip = &host->chip;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int size;
int ret;
clear_read_regs(nandc);
size = host->use_ecc ? host->cw_data : host->cw_size;
/* prepare a clean read buffer */
memset(nandc->data_buffer, 0xff, size);
set_address(host, host->cw_size * (ecc->steps - 1), page);
update_rw_regs(host, 1, true, ecc->steps - 1);
config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
ret = submit_descs(nandc);
if (ret)
dev_err(nandc->dev, "failed to copy last codeword\n");
return ret;
}
static bool qcom_nandc_is_boot_partition(struct qcom_nand_host *host, int page)
{
struct qcom_nand_boot_partition *boot_partition;
u32 start, end;
int i;
/*
* Since the frequent access will be to the non-boot partitions like rootfs,
* optimize the page check by:
*
* 1. Checking if the page lies after the last boot partition.
* 2. Checking from the boot partition end.
*/
/* First check the last boot partition */
boot_partition = &host->boot_partitions[host->nr_boot_partitions - 1];
start = boot_partition->page_offset;
end = start + boot_partition->page_size;
/* Page is after the last boot partition end. This is NOT a boot partition */
if (page > end)
return false;
/* Actually check if it's a boot partition */
if (page < end && page >= start)
return true;
/* Check the other boot partitions starting from the second-last partition */
for (i = host->nr_boot_partitions - 2; i >= 0; i--) {
boot_partition = &host->boot_partitions[i];
start = boot_partition->page_offset;
end = start + boot_partition->page_size;
if (page < end && page >= start)
return true;
}
return false;
}
static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page)
{
bool codeword_fixup = qcom_nandc_is_boot_partition(host, page);
/* Skip conf write if we are already in the correct mode */
if (codeword_fixup == host->codeword_fixup)
return;
host->codeword_fixup = codeword_fixup;
host->cw_data = codeword_fixup ? 512 : 516;
host->spare_bytes = host->cw_size - host->ecc_bytes_hw -
host->bbm_size - host->cw_data;
host->cfg0 &= ~(SPARE_SIZE_BYTES_MASK | UD_SIZE_BYTES_MASK);
host->cfg0 |= host->spare_bytes << SPARE_SIZE_BYTES |
host->cw_data << UD_SIZE_BYTES;
host->ecc_bch_cfg &= ~ECC_NUM_DATA_BYTES_MASK;
host->ecc_bch_cfg |= host->cw_data << ECC_NUM_DATA_BYTES;
host->ecc_buf_cfg = (host->cw_data - 1) << NUM_STEPS;
}
/* implements ecc->read_page() */
static int qcom_nandc_read_page(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *data_buf, *oob_buf = NULL;
if (host->nr_boot_partitions)
qcom_nandc_codeword_fixup(host, page);
nand_read_page_op(chip, page, 0, NULL, 0);
nandc->buf_count = 0;
nandc->buf_start = 0;
host->use_ecc = true;
clear_read_regs(nandc);
set_address(host, 0, page);
update_rw_regs(host, ecc->steps, true, 0);
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
clear_bam_transaction(nandc);
return read_page_ecc(host, data_buf, oob_buf, page);
}
/* implements ecc->read_page_raw() */
static int qcom_nandc_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int cw, ret;
u8 *data_buf = buf, *oob_buf = chip->oob_poi;
if (host->nr_boot_partitions)
qcom_nandc_codeword_fixup(host, page);
for (cw = 0; cw < ecc->steps; cw++) {
ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
page, cw);
if (ret)
return ret;
data_buf += host->cw_data;
oob_buf += ecc->bytes;
}
return 0;
}
/* implements ecc->read_oob() */
static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
if (host->nr_boot_partitions)
qcom_nandc_codeword_fixup(host, page);
clear_read_regs(nandc);
clear_bam_transaction(nandc);
host->use_ecc = true;
set_address(host, 0, page);
update_rw_regs(host, ecc->steps, true, 0);
return read_page_ecc(host, NULL, chip->oob_poi, page);
}
/* implements ecc->write_page() */
static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *data_buf, *oob_buf;
int i, ret;
if (host->nr_boot_partitions)
qcom_nandc_codeword_fixup(host, page);
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
set_address(host, 0, page);
nandc->buf_count = 0;
nandc->buf_start = 0;
clear_read_regs(nandc);
clear_bam_transaction(nandc);
data_buf = (u8 *)buf;
oob_buf = chip->oob_poi;
host->use_ecc = true;
update_rw_regs(host, ecc->steps, false, 0);
config_nand_page_write(chip);
for (i = 0; i < ecc->steps; i++) {
int data_size, oob_size;
if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
data_size = ecc->size - ((ecc->steps - 1) << 2);
oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
host->spare_bytes;
} else {
data_size = host->cw_data;
oob_size = ecc->bytes;
}
write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
/*
* when ECC is enabled, we don't really need to write anything
* to oob for the first n - 1 codewords since these oob regions
* just contain ECC bytes that's written by the controller
* itself. For the last codeword, we skip the bbm positions and
* write to the free oob area.
*/
if (qcom_nandc_is_last_cw(ecc, i)) {
oob_buf += host->bbm_size;
write_data_dma(nandc, FLASH_BUF_ACC + data_size,
oob_buf, oob_size, 0);
}
config_nand_cw_write(chip);
data_buf += data_size;
oob_buf += oob_size;
}
ret = submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to write page\n");
return ret;
}
return nand_prog_page_end_op(chip);
}
/* implements ecc->write_page_raw() */
static int qcom_nandc_write_page_raw(struct nand_chip *chip,
const u8 *buf, int oob_required,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *data_buf, *oob_buf;
int i, ret;
if (host->nr_boot_partitions)
qcom_nandc_codeword_fixup(host, page);
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
clear_read_regs(nandc);
clear_bam_transaction(nandc);
data_buf = (u8 *)buf;
oob_buf = chip->oob_poi;
host->use_ecc = false;
update_rw_regs(host, ecc->steps, false, 0);
config_nand_page_write(chip);
for (i = 0; i < ecc->steps; i++) {
int data_size1, data_size2, oob_size1, oob_size2;
int reg_off = FLASH_BUF_ACC;
data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
oob_size1 = host->bbm_size;
if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
data_size2 = ecc->size - data_size1 -
((ecc->steps - 1) << 2);
oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
host->spare_bytes;
} else {
data_size2 = host->cw_data - data_size1;
oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
}
write_data_dma(nandc, reg_off, data_buf, data_size1,
NAND_BAM_NO_EOT);
reg_off += data_size1;
data_buf += data_size1;
write_data_dma(nandc, reg_off, oob_buf, oob_size1,
NAND_BAM_NO_EOT);
reg_off += oob_size1;
oob_buf += oob_size1;
write_data_dma(nandc, reg_off, data_buf, data_size2,
NAND_BAM_NO_EOT);
reg_off += data_size2;
data_buf += data_size2;
write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
oob_buf += oob_size2;
config_nand_cw_write(chip);
}
ret = submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to write raw page\n");
return ret;
}
return nand_prog_page_end_op(chip);
}
/*
* implements ecc->write_oob()
*
* the NAND controller cannot write only data or only OOB within a codeword
* since ECC is calculated for the combined codeword. So update the OOB from
* chip->oob_poi, and pad the data area with OxFF before writing.
*/
static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *oob = chip->oob_poi;
int data_size, oob_size;
int ret;
if (host->nr_boot_partitions)
qcom_nandc_codeword_fixup(host, page);
host->use_ecc = true;
clear_bam_transaction(nandc);
/* calculate the data and oob size for the last codeword/step */
data_size = ecc->size - ((ecc->steps - 1) << 2);
oob_size = mtd->oobavail;
memset(nandc->data_buffer, 0xff, host->cw_data);
/* override new oob content to last codeword */
mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
0, mtd->oobavail);
set_address(host, host->cw_size * (ecc->steps - 1), page);
update_rw_regs(host, 1, false, 0);
config_nand_page_write(chip);
write_data_dma(nandc, FLASH_BUF_ACC,
nandc->data_buffer, data_size + oob_size, 0);
config_nand_cw_write(chip);
ret = submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to write oob\n");
return ret;
}
return nand_prog_page_end_op(chip);
}
static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int page, ret, bbpos, bad = 0;
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
/*
* configure registers for a raw sub page read, the address is set to
* the beginning of the last codeword, we don't care about reading ecc
* portion of oob. we just want the first few bytes from this codeword
* that contains the BBM
*/
host->use_ecc = false;
clear_bam_transaction(nandc);
ret = copy_last_cw(host, page);
if (ret)
goto err;
if (check_flash_errors(host, 1)) {
dev_warn(nandc->dev, "error when trying to read BBM\n");
goto err;
}
bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
bad = nandc->data_buffer[bbpos] != 0xff;
if (chip->options & NAND_BUSWIDTH_16)
bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
err:
return bad;
}
static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int page, ret;
clear_read_regs(nandc);
clear_bam_transaction(nandc);
/*
* to mark the BBM as bad, we flash the entire last codeword with 0s.
* we don't care about the rest of the content in the codeword since
* we aren't going to use this block again
*/
memset(nandc->data_buffer, 0x00, host->cw_size);
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
/* prepare write */
host->use_ecc = false;
set_address(host, host->cw_size * (ecc->steps - 1), page);
update_rw_regs(host, 1, false, ecc->steps - 1);
config_nand_page_write(chip);
write_data_dma(nandc, FLASH_BUF_ACC,
nandc->data_buffer, host->cw_size, 0);
config_nand_cw_write(chip);
ret = submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to update BBM\n");
return ret;
}
return nand_prog_page_end_op(chip);
}
/*
* NAND controller page layout info
*
* Layout with ECC enabled:
*
* |----------------------| |---------------------------------|
* | xx.......yy| | *********xx.......yy|
* | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
* | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
* | xx.......yy| | *********xx.......yy|
* |----------------------| |---------------------------------|
* codeword 1,2..n-1 codeword n
* <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
*
* n = Number of codewords in the page
* . = ECC bytes
* * = Spare/free bytes
* x = Unused byte(s)
* y = Reserved byte(s)
*
* 2K page: n = 4, spare = 16 bytes
* 4K page: n = 8, spare = 32 bytes
* 8K page: n = 16, spare = 64 bytes
*
* the qcom nand controller operates at a sub page/codeword level. each
* codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
* the number of ECC bytes vary based on the ECC strength and the bus width.
*
* the first n - 1 codewords contains 516 bytes of user data, the remaining
* 12/16 bytes consist of ECC and reserved data. The nth codeword contains
* both user data and spare(oobavail) bytes that sum up to 516 bytes.
*
* When we access a page with ECC enabled, the reserved bytes(s) are not
* accessible at all. When reading, we fill up these unreadable positions
* with 0xffs. When writing, the controller skips writing the inaccessible
* bytes.
*
* Layout with ECC disabled:
*
* |------------------------------| |---------------------------------------|
* | yy xx.......| | bb *********xx.......|
* | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
* | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
* | yy xx.......| | bb *********xx.......|
* |------------------------------| |---------------------------------------|
* codeword 1,2..n-1 codeword n
* <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
*
* n = Number of codewords in the page
* . = ECC bytes
* * = Spare/free bytes
* x = Unused byte(s)
* y = Dummy Bad Bock byte(s)
* b = Real Bad Block byte(s)
* size1/size2 = function of codeword size and 'n'
*
* when the ECC block is disabled, one reserved byte (or two for 16 bit bus
* width) is now accessible. For the first n - 1 codewords, these are dummy Bad
* Block Markers. In the last codeword, this position contains the real BBM
*
* In order to have a consistent layout between RAW and ECC modes, we assume
* the following OOB layout arrangement:
*
* |-----------| |--------------------|
* |yyxx.......| |bb*********xx.......|
* |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
* |yyxx.......| |bb*********xx.......|
* |yyxx.......| |bb*********xx.......|
* |-----------| |--------------------|
* first n - 1 nth OOB region
* OOB regions
*
* n = Number of codewords in the page
* . = ECC bytes
* * = FREE OOB bytes
* y = Dummy bad block byte(s) (inaccessible when ECC enabled)
* x = Unused byte(s)
* b = Real bad block byte(s) (inaccessible when ECC enabled)
*
* This layout is read as is when ECC is disabled. When ECC is enabled, the
* inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
* and assumed as 0xffs when we read a page/oob. The ECC, unused and
* dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
* the sum of the three).
*/
static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
if (section > 1)
return -ERANGE;
if (!section) {
oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
host->bbm_size;
oobregion->offset = 0;
} else {
oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
oobregion->offset = mtd->oobsize - oobregion->length;
}
return 0;
}
static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
if (section)
return -ERANGE;
oobregion->length = ecc->steps * 4;
oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
return 0;
}
static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
.ecc = qcom_nand_ooblayout_ecc,
.free = qcom_nand_ooblayout_free,
};
static int
qcom_nandc_calc_ecc_bytes(int step_size, int strength)
{
return strength == 4 ? 12 : 16;
}
NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
NANDC_STEP_SIZE, 4, 8);
static int qcom_nand_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
int cwperpage, bad_block_byte, ret;
bool wide_bus;
int ecc_mode = 1;
/* controller only supports 512 bytes data steps */
ecc->size = NANDC_STEP_SIZE;
wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
cwperpage = mtd->writesize / NANDC_STEP_SIZE;
/*
* Each CW has 4 available OOB bytes which will be protected with ECC
* so remaining bytes can be used for ECC.
*/
ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
mtd->oobsize - (cwperpage * 4));
if (ret) {
dev_err(nandc->dev, "No valid ECC settings possible\n");
return ret;
}
if (ecc->strength >= 8) {
/* 8 bit ECC defaults to BCH ECC on all platforms */
host->bch_enabled = true;
ecc_mode = 1;
if (wide_bus) {
host->ecc_bytes_hw = 14;
host->spare_bytes = 0;
host->bbm_size = 2;
} else {
host->ecc_bytes_hw = 13;
host->spare_bytes = 2;
host->bbm_size = 1;
}
} else {
/*
* if the controller supports BCH for 4 bit ECC, the controller
* uses lesser bytes for ECC. If RS is used, the ECC bytes is
* always 10 bytes
*/
if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
/* BCH */
host->bch_enabled = true;
ecc_mode = 0;
if (wide_bus) {
host->ecc_bytes_hw = 8;
host->spare_bytes = 2;
host->bbm_size = 2;
} else {
host->ecc_bytes_hw = 7;
host->spare_bytes = 4;
host->bbm_size = 1;
}
} else {
/* RS */
host->ecc_bytes_hw = 10;
if (wide_bus) {
host->spare_bytes = 0;
host->bbm_size = 2;
} else {
host->spare_bytes = 1;
host->bbm_size = 1;
}
}
}
/*
* we consider ecc->bytes as the sum of all the non-data content in a
* step. It gives us a clean representation of the oob area (even if
* all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
* ECC and 12 bytes for 4 bit ECC
*/
ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
ecc->read_page = qcom_nandc_read_page;
ecc->read_page_raw = qcom_nandc_read_page_raw;
ecc->read_oob = qcom_nandc_read_oob;
ecc->write_page = qcom_nandc_write_page;
ecc->write_page_raw = qcom_nandc_write_page_raw;
ecc->write_oob = qcom_nandc_write_oob;
ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
/* Free the initially allocated BAM transaction for reading the ONFI params */
if (nandc->props->is_bam)
free_bam_transaction(nandc);
nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
cwperpage);
/* Now allocate the BAM transaction based on updated max_cwperpage */
if (nandc->props->is_bam) {
nandc->bam_txn = alloc_bam_transaction(nandc);
if (!nandc->bam_txn) {
dev_err(nandc->dev,
"failed to allocate bam transaction\n");
return -ENOMEM;
}
}
/*
* DATA_UD_BYTES varies based on whether the read/write command protects
* spare data with ECC too. We protect spare data by default, so we set
* it to main + spare data, which are 512 and 4 bytes respectively.
*/
host->cw_data = 516;
/*
* total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
* for 8 bit ECC
*/
host->cw_size = host->cw_data + ecc->bytes;
bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
| host->cw_data << UD_SIZE_BYTES
| 0 << DISABLE_STATUS_AFTER_WRITE
| 5 << NUM_ADDR_CYCLES
| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
| 0 << STATUS_BFR_READ
| 1 << SET_RD_MODE_AFTER_STATUS
| host->spare_bytes << SPARE_SIZE_BYTES;
host->cfg1 = 7 << NAND_RECOVERY_CYCLES
| 0 << CS_ACTIVE_BSY
| bad_block_byte << BAD_BLOCK_BYTE_NUM
| 0 << BAD_BLOCK_IN_SPARE_AREA
| 2 << WR_RD_BSY_GAP
| wide_bus << WIDE_FLASH
| host->bch_enabled << ENABLE_BCH_ECC;
host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
| host->cw_size << UD_SIZE_BYTES
| 5 << NUM_ADDR_CYCLES
| 0 << SPARE_SIZE_BYTES;
host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
| 0 << CS_ACTIVE_BSY
| 17 << BAD_BLOCK_BYTE_NUM
| 1 << BAD_BLOCK_IN_SPARE_AREA
| 2 << WR_RD_BSY_GAP
| wide_bus << WIDE_FLASH
| 1 << DEV0_CFG1_ECC_DISABLE;
host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
| 0 << ECC_SW_RESET
| host->cw_data << ECC_NUM_DATA_BYTES
| 1 << ECC_FORCE_CLK_OPEN
| ecc_mode << ECC_MODE
| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
if (!nandc->props->qpic_v2)
host->ecc_buf_cfg = 0x203 << NUM_STEPS;
host->clrflashstatus = FS_READY_BSY_N;
host->clrreadstatus = 0xc0;
nandc->regs->erased_cw_detect_cfg_clr =
cpu_to_le32(CLR_ERASED_PAGE_DET);
nandc->regs->erased_cw_detect_cfg_set =
cpu_to_le32(SET_ERASED_PAGE_DET);
dev_dbg(nandc->dev,
"cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
cwperpage);
return 0;
}
static int qcom_op_cmd_mapping(struct nand_chip *chip, u8 opcode,
struct qcom_op *q_op)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
int cmd;
switch (opcode) {
case NAND_CMD_RESET:
cmd = OP_RESET_DEVICE;
break;
case NAND_CMD_READID:
cmd = OP_FETCH_ID;
break;
case NAND_CMD_PARAM:
if (nandc->props->qpic_v2)
cmd = OP_PAGE_READ_ONFI_READ;
else
cmd = OP_PAGE_READ;
break;
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
cmd = OP_BLOCK_ERASE;
break;
case NAND_CMD_STATUS:
cmd = OP_CHECK_STATUS;
break;
case NAND_CMD_PAGEPROG:
cmd = OP_PROGRAM_PAGE;
q_op->flag = OP_PROGRAM_PAGE;
nandc->exec_opwrite = true;
break;
case NAND_CMD_READ0:
case NAND_CMD_READSTART:
if (host->use_ecc)
cmd = OP_PAGE_READ_WITH_ECC;
else
cmd = OP_PAGE_READ;
break;
default:
dev_err(nandc->dev, "Opcode not supported: %u\n", opcode);
return -EOPNOTSUPP;
}
return cmd;
}
/* NAND framework ->exec_op() hooks and related helpers */
static int qcom_parse_instructions(struct nand_chip *chip,
const struct nand_subop *subop,
struct qcom_op *q_op)
{
const struct nand_op_instr *instr = NULL;
unsigned int op_id;
int i, ret;
for (op_id = 0; op_id < subop->ninstrs; op_id++) {
unsigned int offset, naddrs;
const u8 *addrs;
instr = &subop->instrs[op_id];
switch (instr->type) {
case NAND_OP_CMD_INSTR:
ret = qcom_op_cmd_mapping(chip, instr->ctx.cmd.opcode, q_op);
if (ret < 0)
return ret;
q_op->cmd_reg = ret;
q_op->rdy_delay_ns = instr->delay_ns;
break;
case NAND_OP_ADDR_INSTR:
offset = nand_subop_get_addr_start_off(subop, op_id);
naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
addrs = &instr->ctx.addr.addrs[offset];
for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
q_op->addr1_reg |= addrs[i] << (i * 8);
if (naddrs > 4)
q_op->addr2_reg |= addrs[4];
q_op->rdy_delay_ns = instr->delay_ns;
break;
case NAND_OP_DATA_IN_INSTR:
q_op->data_instr = instr;
q_op->data_instr_idx = op_id;
q_op->rdy_delay_ns = instr->delay_ns;
fallthrough;
case NAND_OP_DATA_OUT_INSTR:
q_op->rdy_delay_ns = instr->delay_ns;
break;
case NAND_OP_WAITRDY_INSTR:
q_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
q_op->rdy_delay_ns = instr->delay_ns;
break;
}
}
return 0;
}
static void qcom_delay_ns(unsigned int ns)
{
if (!ns)
return;
if (ns < 10000)
ndelay(ns);
else
udelay(DIV_ROUND_UP(ns, 1000));
}
static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
unsigned long start = jiffies + msecs_to_jiffies(time_ms);
u32 flash;
nandc_read_buffer_sync(nandc, true);
do {
flash = le32_to_cpu(nandc->reg_read_buf[0]);
if (flash & FS_READY_BSY_N)
return 0;
cpu_relax();
} while (time_after(start, jiffies));
dev_err(nandc->dev, "Timeout waiting for device to be ready:0x%08x\n", flash);
return -ETIMEDOUT;
}
static int qcom_read_status_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
struct qcom_op q_op = {};
const struct nand_op_instr *instr = NULL;
unsigned int op_id = 0;
unsigned int len = 0;
int ret, num_cw, i;
u32 flash_status;
host->status = NAND_STATUS_READY | NAND_STATUS_WP;
ret = qcom_parse_instructions(chip, subop, &q_op);
if (ret)
return ret;
num_cw = nandc->exec_opwrite ? ecc->steps : 1;
nandc->exec_opwrite = false;
nandc->buf_count = 0;
nandc->buf_start = 0;
host->use_ecc = false;
clear_read_regs(nandc);
clear_bam_transaction(nandc);
nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
nandc_set_reg(chip, NAND_EXEC_CMD, 1);
write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
ret = submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting status descriptor\n");
goto err_out;
}
nandc_read_buffer_sync(nandc, true);
for (i = 0; i < num_cw; i++) {
flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
if (flash_status & FS_MPU_ERR)
host->status &= ~NAND_STATUS_WP;
if (flash_status & FS_OP_ERR ||
(i == (num_cw - 1) && (flash_status & FS_DEVICE_STS_ERR)))
host->status |= NAND_STATUS_FAIL;
}
flash_status = host->status;
instr = q_op.data_instr;
op_id = q_op.data_instr_idx;
len = nand_subop_get_data_len(subop, op_id);
memcpy(instr->ctx.data.buf.in, &flash_status, len);
err_out:
return ret;
}
static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_op q_op = {};
const struct nand_op_instr *instr = NULL;
unsigned int op_id = 0;
unsigned int len = 0;
int ret;
ret = qcom_parse_instructions(chip, subop, &q_op);
if (ret)
return ret;
nandc->buf_count = 0;
nandc->buf_start = 0;
host->use_ecc = false;
clear_read_regs(nandc);
clear_bam_transaction(nandc);
nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
nandc->props->is_bam ? 0 : DM_EN);
nandc_set_reg(chip, NAND_EXEC_CMD, 1);
write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
ret = submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting read id descriptor\n");
goto err_out;
}
instr = q_op.data_instr;
op_id = q_op.data_instr_idx;
len = nand_subop_get_data_len(subop, op_id);
nandc_read_buffer_sync(nandc, true);
memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
err_out:
return ret;
}
static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_op q_op = {};
int ret;
int instrs = 1;
ret = qcom_parse_instructions(chip, subop, &q_op);
if (ret)
return ret;
if (q_op.flag == OP_PROGRAM_PAGE) {
goto wait_rdy;
} else if (q_op.cmd_reg == OP_BLOCK_ERASE) {
q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
nandc_set_reg(chip, NAND_DEV0_CFG0,
host->cfg0_raw & ~(7 << CW_PER_PAGE));
nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
instrs = 3;
} else {
return 0;
}
nandc->buf_count = 0;
nandc->buf_start = 0;
host->use_ecc = false;
clear_read_regs(nandc);
clear_bam_transaction(nandc);
nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
nandc_set_reg(chip, NAND_EXEC_CMD, 1);
write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
(q_op.cmd_reg == OP_BLOCK_ERASE) ? write_reg_dma(nandc, NAND_DEV0_CFG0,
2, NAND_BAM_NEXT_SGL) : read_reg_dma(nandc,
NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
ret = submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting misc descriptor\n");
goto err_out;
}
wait_rdy:
qcom_delay_ns(q_op.rdy_delay_ns);
ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms);
err_out:
return ret;
}
static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct qcom_op q_op = {};
const struct nand_op_instr *instr = NULL;
unsigned int op_id = 0;
unsigned int len = 0;
int ret;
ret = qcom_parse_instructions(chip, subop, &q_op);
if (ret)
return ret;
q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
nandc->buf_count = 0;
nandc->buf_start = 0;
host->use_ecc = false;
clear_read_regs(nandc);
clear_bam_transaction(nandc);
nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
nandc_set_reg(chip, NAND_ADDR0, 0);
nandc_set_reg(chip, NAND_ADDR1, 0);
nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
| 512 << UD_SIZE_BYTES
| 5 << NUM_ADDR_CYCLES
| 0 << SPARE_SIZE_BYTES);
nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
| 0 << CS_ACTIVE_BSY
| 17 << BAD_BLOCK_BYTE_NUM
| 1 << BAD_BLOCK_IN_SPARE_AREA
| 2 << WR_RD_BSY_GAP
| 0 << WIDE_FLASH
| 1 << DEV0_CFG1_ECC_DISABLE);
if (!nandc->props->qpic_v2)
nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
/* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
if (!nandc->props->qpic_v2) {
nandc_set_reg(chip, NAND_DEV_CMD_VLD,
(nandc->vld & ~READ_START_VLD));
nandc_set_reg(chip, NAND_DEV_CMD1,
(nandc->cmd1 & ~(0xFF << READ_ADDR))
| NAND_CMD_PARAM << READ_ADDR);
}
nandc_set_reg(chip, NAND_EXEC_CMD, 1);
if (!nandc->props->qpic_v2) {
nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
}
instr = q_op.data_instr;
op_id = q_op.data_instr_idx;
len = nand_subop_get_data_len(subop, op_id);
nandc_set_read_loc(chip, 0, 0, 0, len, 1);
if (!nandc->props->qpic_v2) {
write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
}
nandc->buf_count = len;
memset(nandc->data_buffer, 0xff, nandc->buf_count);
config_nand_single_cw_page_read(chip, false, 0);
read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
nandc->buf_count, 0);
/* restore CMD1 and VLD regs */
if (!nandc->props->qpic_v2) {
write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
}
ret = submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting param page descriptor\n");
goto err_out;
}
ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms);
if (ret)
goto err_out;
memcpy(instr->ctx.data.buf.in, nandc->data_buffer, len);
err_out:
return ret;
}
static const struct nand_op_parser qcom_op_parser = NAND_OP_PARSER(
NAND_OP_PARSER_PATTERN(
qcom_read_id_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
NAND_OP_PARSER_PATTERN(
qcom_read_status_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
NAND_OP_PARSER_PATTERN(
qcom_param_page_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 512)),
NAND_OP_PARSER_PATTERN(
qcom_misc_cmd_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYCLE),
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
);
static int qcom_check_op(struct nand_chip *chip,
const struct nand_operation *op)
{
const struct nand_op_instr *instr;
int op_id;
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
switch (instr->type) {
case NAND_OP_CMD_INSTR:
if (instr->ctx.cmd.opcode != NAND_CMD_RESET &&
instr->ctx.cmd.opcode != NAND_CMD_READID &&
instr->ctx.cmd.opcode != NAND_CMD_PARAM &&
instr->ctx.cmd.opcode != NAND_CMD_ERASE1 &&
instr->ctx.cmd.opcode != NAND_CMD_ERASE2 &&
instr->ctx.cmd.opcode != NAND_CMD_STATUS &&
instr->ctx.cmd.opcode != NAND_CMD_PAGEPROG &&
instr->ctx.cmd.opcode != NAND_CMD_READ0 &&
instr->ctx.cmd.opcode != NAND_CMD_READSTART)
return -EOPNOTSUPP;
break;
default:
break;
}
}
return 0;
}
static int qcom_nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op, bool check_only)
{
if (check_only)
return qcom_check_op(chip, op);
return nand_op_parser_exec_op(chip, &qcom_op_parser, op, check_only);
}
static const struct nand_controller_ops qcom_nandc_ops = {
.attach_chip = qcom_nand_attach_chip,
.exec_op = qcom_nand_exec_op,
};
static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
{
if (nandc->props->is_bam) {
if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
dma_unmap_single(nandc->dev, nandc->reg_read_dma,
MAX_REG_RD *
sizeof(*nandc->reg_read_buf),
DMA_FROM_DEVICE);
if (nandc->tx_chan)
dma_release_channel(nandc->tx_chan);
if (nandc->rx_chan)
dma_release_channel(nandc->rx_chan);
if (nandc->cmd_chan)
dma_release_channel(nandc->cmd_chan);
} else {
if (nandc->chan)
dma_release_channel(nandc->chan);
}
}
static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
{
int ret;
ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(nandc->dev, "failed to set DMA mask\n");
return ret;
}
/*
* we use the internal buffer for reading ONFI params, reading small
* data like ID and status, and preforming read-copy-write operations
* when writing to a codeword partially. 532 is the maximum possible
* size of a codeword for our nand controller
*/
nandc->buf_size = 532;
nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
if (!nandc->data_buffer)
return -ENOMEM;
nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
if (!nandc->regs)
return -ENOMEM;
nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
sizeof(*nandc->reg_read_buf),
GFP_KERNEL);
if (!nandc->reg_read_buf)
return -ENOMEM;
if (nandc->props->is_bam) {
nandc->reg_read_dma =
dma_map_single(nandc->dev, nandc->reg_read_buf,
MAX_REG_RD *
sizeof(*nandc->reg_read_buf),
DMA_FROM_DEVICE);
if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
return -EIO;
}
nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
if (IS_ERR(nandc->tx_chan)) {
ret = PTR_ERR(nandc->tx_chan);
nandc->tx_chan = NULL;
dev_err_probe(nandc->dev, ret,
"tx DMA channel request failed\n");
goto unalloc;
}
nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
if (IS_ERR(nandc->rx_chan)) {
ret = PTR_ERR(nandc->rx_chan);
nandc->rx_chan = NULL;
dev_err_probe(nandc->dev, ret,
"rx DMA channel request failed\n");
goto unalloc;
}
nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
if (IS_ERR(nandc->cmd_chan)) {
ret = PTR_ERR(nandc->cmd_chan);
nandc->cmd_chan = NULL;
dev_err_probe(nandc->dev, ret,
"cmd DMA channel request failed\n");
goto unalloc;
}
/*
* Initially allocate BAM transaction to read ONFI param page.
* After detecting all the devices, this BAM transaction will
* be freed and the next BAM transaction will be allocated with
* maximum codeword size
*/
nandc->max_cwperpage = 1;
nandc->bam_txn = alloc_bam_transaction(nandc);
if (!nandc->bam_txn) {
dev_err(nandc->dev,
"failed to allocate bam transaction\n");
ret = -ENOMEM;
goto unalloc;
}
} else {
nandc->chan = dma_request_chan(nandc->dev, "rxtx");
if (IS_ERR(nandc->chan)) {
ret = PTR_ERR(nandc->chan);
nandc->chan = NULL;
dev_err_probe(nandc->dev, ret,
"rxtx DMA channel request failed\n");
return ret;
}
}
INIT_LIST_HEAD(&nandc->desc_list);
INIT_LIST_HEAD(&nandc->host_list);
nand_controller_init(&nandc->controller);
nandc->controller.ops = &qcom_nandc_ops;
return 0;
unalloc:
qcom_nandc_unalloc(nandc);
return ret;
}
/* one time setup of a few nand controller registers */
static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
{
u32 nand_ctrl;
/* kill onenand */
if (!nandc->props->is_qpic)
nandc_write(nandc, SFLASHC_BURST_CFG, 0);
if (!nandc->props->qpic_v2)
nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
NAND_DEV_CMD_VLD_VAL);
/* enable ADM or BAM DMA */
if (nandc->props->is_bam) {
nand_ctrl = nandc_read(nandc, NAND_CTRL);
/*
*NAND_CTRL is an operational registers, and CPU
* access to operational registers are read only
* in BAM mode. So update the NAND_CTRL register
* only if it is not in BAM mode. In most cases BAM
* mode will be enabled in bootloader
*/
if (!(nand_ctrl & BAM_MODE_EN))
nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
} else {
nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
}
/* save the original values of these registers */
if (!nandc->props->qpic_v2) {
nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
nandc->vld = NAND_DEV_CMD_VLD_VAL;
}
return 0;
}
static const char * const probes[] = { "cmdlinepart", "ofpart", "qcomsmem", NULL };
static int qcom_nand_host_parse_boot_partitions(struct qcom_nand_controller *nandc,
struct qcom_nand_host *host,
struct device_node *dn)
{
struct nand_chip *chip = &host->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
struct qcom_nand_boot_partition *boot_partition;
struct device *dev = nandc->dev;
int partitions_count, i, j, ret;
if (!of_property_present(dn, "qcom,boot-partitions"))
return 0;
partitions_count = of_property_count_u32_elems(dn, "qcom,boot-partitions");
if (partitions_count <= 0) {
dev_err(dev, "Error parsing boot partition\n");
return partitions_count ? partitions_count : -EINVAL;
}
host->nr_boot_partitions = partitions_count / 2;
host->boot_partitions = devm_kcalloc(dev, host->nr_boot_partitions,
sizeof(*host->boot_partitions), GFP_KERNEL);
if (!host->boot_partitions) {
host->nr_boot_partitions = 0;
return -ENOMEM;
}
for (i = 0, j = 0; i < host->nr_boot_partitions; i++, j += 2) {
boot_partition = &host->boot_partitions[i];
ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j,
&boot_partition->page_offset);
if (ret) {
dev_err(dev, "Error parsing boot partition offset at index %d\n", i);
host->nr_boot_partitions = 0;
return ret;
}
if (boot_partition->page_offset % mtd->writesize) {
dev_err(dev, "Boot partition offset not multiple of writesize at index %i\n",
i);
host->nr_boot_partitions = 0;
return -EINVAL;
}
/* Convert offset to nand pages */
boot_partition->page_offset /= mtd->writesize;
ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j + 1,
&boot_partition->page_size);
if (ret) {
dev_err(dev, "Error parsing boot partition size at index %d\n", i);
host->nr_boot_partitions = 0;
return ret;
}
if (boot_partition->page_size % mtd->writesize) {
dev_err(dev, "Boot partition size not multiple of writesize at index %i\n",
i);
host->nr_boot_partitions = 0;
return -EINVAL;
}
/* Convert size to nand pages */
boot_partition->page_size /= mtd->writesize;
}
return 0;
}
static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
struct qcom_nand_host *host,
struct device_node *dn)
{
struct nand_chip *chip = &host->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
struct device *dev = nandc->dev;
int ret;
ret = of_property_read_u32(dn, "reg", &host->cs);
if (ret) {
dev_err(dev, "can't get chip-select\n");
return -ENXIO;
}
nand_set_flash_node(chip, dn);
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
if (!mtd->name)
return -ENOMEM;
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
/*
* the bad block marker is readable only when we read the last codeword
* of a page with ECC disabled. currently, the nand_base and nand_bbt
* helpers don't allow us to read BB from a nand chip with ECC
* disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
* and block_markbad helpers until we permanently switch to using
* MTD_OPS_RAW for all drivers (with the help of badblockbits)
*/
chip->legacy.block_bad = qcom_nandc_block_bad;
chip->legacy.block_markbad = qcom_nandc_block_markbad;
chip->controller = &nandc->controller;
chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
NAND_SKIP_BBTSCAN;
/* set up initial status value */
host->status = NAND_STATUS_READY | NAND_STATUS_WP;
ret = nand_scan(chip, 1);
if (ret)
return ret;
ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
if (ret)
goto err;
if (nandc->props->use_codeword_fixup) {
ret = qcom_nand_host_parse_boot_partitions(nandc, host, dn);
if (ret)
goto err;
}
return 0;
err:
nand_cleanup(chip);
return ret;
}
static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
{
struct device *dev = nandc->dev;
struct device_node *dn = dev->of_node, *child;
struct qcom_nand_host *host;
int ret = -ENODEV;
for_each_available_child_of_node(dn, child) {
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
of_node_put(child);
return -ENOMEM;
}
ret = qcom_nand_host_init_and_register(nandc, host, child);
if (ret) {
devm_kfree(dev, host);
continue;
}
list_add_tail(&host->node, &nandc->host_list);
}
return ret;
}
/* parse custom DT properties here */
static int qcom_nandc_parse_dt(struct platform_device *pdev)
{
struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
struct device_node *np = nandc->dev->of_node;
int ret;
if (!nandc->props->is_bam) {
ret = of_property_read_u32(np, "qcom,cmd-crci",
&nandc->cmd_crci);
if (ret) {
dev_err(nandc->dev, "command CRCI unspecified\n");
return ret;
}
ret = of_property_read_u32(np, "qcom,data-crci",
&nandc->data_crci);
if (ret) {
dev_err(nandc->dev, "data CRCI unspecified\n");
return ret;
}
}
return 0;
}
static int qcom_nandc_probe(struct platform_device *pdev)
{
struct qcom_nand_controller *nandc;
const void *dev_data;
struct device *dev = &pdev->dev;
struct resource *res;
int ret;
nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
if (!nandc)
return -ENOMEM;
platform_set_drvdata(pdev, nandc);
nandc->dev = dev;
dev_data = of_device_get_match_data(dev);
if (!dev_data) {
dev_err(&pdev->dev, "failed to get device data\n");
return -ENODEV;
}
nandc->props = dev_data;
nandc->core_clk = devm_clk_get(dev, "core");
if (IS_ERR(nandc->core_clk))
return PTR_ERR(nandc->core_clk);
nandc->aon_clk = devm_clk_get(dev, "aon");
if (IS_ERR(nandc->aon_clk))
return PTR_ERR(nandc->aon_clk);
ret = qcom_nandc_parse_dt(pdev);
if (ret)
return ret;
nandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(nandc->base))
return PTR_ERR(nandc->base);
nandc->base_phys = res->start;
nandc->base_dma = dma_map_resource(dev, res->start,
resource_size(res),
DMA_BIDIRECTIONAL, 0);
if (dma_mapping_error(dev, nandc->base_dma))
return -ENXIO;
ret = clk_prepare_enable(nandc->core_clk);
if (ret)
goto err_core_clk;
ret = clk_prepare_enable(nandc->aon_clk);
if (ret)
goto err_aon_clk;
ret = qcom_nandc_alloc(nandc);
if (ret)
goto err_nandc_alloc;
ret = qcom_nandc_setup(nandc);
if (ret)
goto err_setup;
ret = qcom_probe_nand_devices(nandc);
if (ret)
goto err_setup;
return 0;
err_setup:
qcom_nandc_unalloc(nandc);
err_nandc_alloc:
clk_disable_unprepare(nandc->aon_clk);
err_aon_clk:
clk_disable_unprepare(nandc->core_clk);
err_core_clk:
dma_unmap_resource(dev, res->start, resource_size(res),
DMA_BIDIRECTIONAL, 0);
return ret;
}
static void qcom_nandc_remove(struct platform_device *pdev)
{
struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct qcom_nand_host *host;
struct nand_chip *chip;
int ret;
list_for_each_entry(host, &nandc->host_list, node) {
chip = &host->chip;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
}
qcom_nandc_unalloc(nandc);
clk_disable_unprepare(nandc->aon_clk);
clk_disable_unprepare(nandc->core_clk);
dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
DMA_BIDIRECTIONAL, 0);
}
static const struct qcom_nandc_props ipq806x_nandc_props = {
.ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
.is_bam = false,
.use_codeword_fixup = true,
.dev_cmd_reg_start = 0x0,
};
static const struct qcom_nandc_props ipq4019_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
.is_bam = true,
.is_qpic = true,
.dev_cmd_reg_start = 0x0,
};
static const struct qcom_nandc_props ipq8074_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
.is_bam = true,
.is_qpic = true,
.dev_cmd_reg_start = 0x7000,
};
static const struct qcom_nandc_props sdx55_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
.is_bam = true,
.is_qpic = true,
.qpic_v2 = true,
.dev_cmd_reg_start = 0x7000,
};
/*
* data will hold a struct pointer containing more differences once we support
* more controller variants
*/
static const struct of_device_id qcom_nandc_of_match[] = {
{
.compatible = "qcom,ipq806x-nand",
.data = &ipq806x_nandc_props,
},
{
.compatible = "qcom,ipq4019-nand",
.data = &ipq4019_nandc_props,
},
{
.compatible = "qcom,ipq6018-nand",
.data = &ipq8074_nandc_props,
},
{
.compatible = "qcom,ipq8074-nand",
.data = &ipq8074_nandc_props,
},
{
.compatible = "qcom,sdx55-nand",
.data = &sdx55_nandc_props,
},
{}
};
MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
static struct platform_driver qcom_nandc_driver = {
.driver = {
.name = "qcom-nandc",
.of_match_table = qcom_nandc_of_match,
},
.probe = qcom_nandc_probe,
.remove_new = qcom_nandc_remove,
};
module_platform_driver(qcom_nandc_driver);
MODULE_AUTHOR("Archit Taneja <[email protected]>");
MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mtd/nand/raw/qcom_nandc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Stefan Agner <[email protected]>
* Copyright (C) 2014-2015 Lucas Stach <[email protected]>
* Copyright (C) 2012 Avionic Design GmbH
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/rawnand.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <soc/tegra/common.h>
#define COMMAND 0x00
#define COMMAND_GO BIT(31)
#define COMMAND_CLE BIT(30)
#define COMMAND_ALE BIT(29)
#define COMMAND_PIO BIT(28)
#define COMMAND_TX BIT(27)
#define COMMAND_RX BIT(26)
#define COMMAND_SEC_CMD BIT(25)
#define COMMAND_AFT_DAT BIT(24)
#define COMMAND_TRANS_SIZE(size) ((((size) - 1) & 0xf) << 20)
#define COMMAND_A_VALID BIT(19)
#define COMMAND_B_VALID BIT(18)
#define COMMAND_RD_STATUS_CHK BIT(17)
#define COMMAND_RBSY_CHK BIT(16)
#define COMMAND_CE(x) BIT(8 + ((x) & 0x7))
#define COMMAND_CLE_SIZE(size) ((((size) - 1) & 0x3) << 4)
#define COMMAND_ALE_SIZE(size) ((((size) - 1) & 0xf) << 0)
#define STATUS 0x04
#define ISR 0x08
#define ISR_CORRFAIL_ERR BIT(24)
#define ISR_UND BIT(7)
#define ISR_OVR BIT(6)
#define ISR_CMD_DONE BIT(5)
#define ISR_ECC_ERR BIT(4)
#define IER 0x0c
#define IER_ERR_TRIG_VAL(x) (((x) & 0xf) << 16)
#define IER_UND BIT(7)
#define IER_OVR BIT(6)
#define IER_CMD_DONE BIT(5)
#define IER_ECC_ERR BIT(4)
#define IER_GIE BIT(0)
#define CONFIG 0x10
#define CONFIG_HW_ECC BIT(31)
#define CONFIG_ECC_SEL BIT(30)
#define CONFIG_ERR_COR BIT(29)
#define CONFIG_PIPE_EN BIT(28)
#define CONFIG_TVAL_4 (0 << 24)
#define CONFIG_TVAL_6 (1 << 24)
#define CONFIG_TVAL_8 (2 << 24)
#define CONFIG_SKIP_SPARE BIT(23)
#define CONFIG_BUS_WIDTH_16 BIT(21)
#define CONFIG_COM_BSY BIT(20)
#define CONFIG_PS_256 (0 << 16)
#define CONFIG_PS_512 (1 << 16)
#define CONFIG_PS_1024 (2 << 16)
#define CONFIG_PS_2048 (3 << 16)
#define CONFIG_PS_4096 (4 << 16)
#define CONFIG_SKIP_SPARE_SIZE_4 (0 << 14)
#define CONFIG_SKIP_SPARE_SIZE_8 (1 << 14)
#define CONFIG_SKIP_SPARE_SIZE_12 (2 << 14)
#define CONFIG_SKIP_SPARE_SIZE_16 (3 << 14)
#define CONFIG_TAG_BYTE_SIZE(x) ((x) & 0xff)
#define TIMING_1 0x14
#define TIMING_TRP_RESP(x) (((x) & 0xf) << 28)
#define TIMING_TWB(x) (((x) & 0xf) << 24)
#define TIMING_TCR_TAR_TRR(x) (((x) & 0xf) << 20)
#define TIMING_TWHR(x) (((x) & 0xf) << 16)
#define TIMING_TCS(x) (((x) & 0x3) << 14)
#define TIMING_TWH(x) (((x) & 0x3) << 12)
#define TIMING_TWP(x) (((x) & 0xf) << 8)
#define TIMING_TRH(x) (((x) & 0x3) << 4)
#define TIMING_TRP(x) (((x) & 0xf) << 0)
#define RESP 0x18
#define TIMING_2 0x1c
#define TIMING_TADL(x) ((x) & 0xf)
#define CMD_REG1 0x20
#define CMD_REG2 0x24
#define ADDR_REG1 0x28
#define ADDR_REG2 0x2c
#define DMA_MST_CTRL 0x30
#define DMA_MST_CTRL_GO BIT(31)
#define DMA_MST_CTRL_IN (0 << 30)
#define DMA_MST_CTRL_OUT BIT(30)
#define DMA_MST_CTRL_PERF_EN BIT(29)
#define DMA_MST_CTRL_IE_DONE BIT(28)
#define DMA_MST_CTRL_REUSE BIT(27)
#define DMA_MST_CTRL_BURST_1 (2 << 24)
#define DMA_MST_CTRL_BURST_4 (3 << 24)
#define DMA_MST_CTRL_BURST_8 (4 << 24)
#define DMA_MST_CTRL_BURST_16 (5 << 24)
#define DMA_MST_CTRL_IS_DONE BIT(20)
#define DMA_MST_CTRL_EN_A BIT(2)
#define DMA_MST_CTRL_EN_B BIT(1)
#define DMA_CFG_A 0x34
#define DMA_CFG_B 0x38
#define FIFO_CTRL 0x3c
#define FIFO_CTRL_CLR_ALL BIT(3)
#define DATA_PTR 0x40
#define TAG_PTR 0x44
#define ECC_PTR 0x48
#define DEC_STATUS 0x4c
#define DEC_STATUS_A_ECC_FAIL BIT(1)
#define DEC_STATUS_ERR_COUNT_MASK 0x00ff0000
#define DEC_STATUS_ERR_COUNT_SHIFT 16
#define HWSTATUS_CMD 0x50
#define HWSTATUS_MASK 0x54
#define HWSTATUS_RDSTATUS_MASK(x) (((x) & 0xff) << 24)
#define HWSTATUS_RDSTATUS_VALUE(x) (((x) & 0xff) << 16)
#define HWSTATUS_RBSY_MASK(x) (((x) & 0xff) << 8)
#define HWSTATUS_RBSY_VALUE(x) (((x) & 0xff) << 0)
#define BCH_CONFIG 0xcc
#define BCH_ENABLE BIT(0)
#define BCH_TVAL_4 (0 << 4)
#define BCH_TVAL_8 (1 << 4)
#define BCH_TVAL_14 (2 << 4)
#define BCH_TVAL_16 (3 << 4)
#define DEC_STAT_RESULT 0xd0
#define DEC_STAT_BUF 0xd4
#define DEC_STAT_BUF_FAIL_SEC_FLAG_MASK 0xff000000
#define DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT 24
#define DEC_STAT_BUF_CORR_SEC_FLAG_MASK 0x00ff0000
#define DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT 16
#define DEC_STAT_BUF_MAX_CORR_CNT_MASK 0x00001f00
#define DEC_STAT_BUF_MAX_CORR_CNT_SHIFT 8
#define OFFSET(val, off) ((val) < (off) ? 0 : (val) - (off))
#define SKIP_SPARE_BYTES 4
#define BITS_PER_STEP_RS 18
#define BITS_PER_STEP_BCH 13
#define INT_MASK (IER_UND | IER_OVR | IER_CMD_DONE | IER_GIE)
#define HWSTATUS_CMD_DEFAULT NAND_STATUS_READY
#define HWSTATUS_MASK_DEFAULT (HWSTATUS_RDSTATUS_MASK(1) | \
HWSTATUS_RDSTATUS_VALUE(0) | \
HWSTATUS_RBSY_MASK(NAND_STATUS_READY) | \
HWSTATUS_RBSY_VALUE(NAND_STATUS_READY))
struct tegra_nand_controller {
struct nand_controller controller;
struct device *dev;
void __iomem *regs;
int irq;
struct clk *clk;
struct completion command_complete;
struct completion dma_complete;
bool last_read_error;
int cur_cs;
struct nand_chip *chip;
};
struct tegra_nand_chip {
struct nand_chip chip;
struct gpio_desc *wp_gpio;
struct mtd_oob_region ecc;
u32 config;
u32 config_ecc;
u32 bch_config;
int cs[1];
};
static inline struct tegra_nand_controller *
to_tegra_ctrl(struct nand_controller *hw_ctrl)
{
return container_of(hw_ctrl, struct tegra_nand_controller, controller);
}
static inline struct tegra_nand_chip *to_tegra_chip(struct nand_chip *chip)
{
return container_of(chip, struct tegra_nand_chip, chip);
}
static int tegra_nand_ooblayout_rs_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_RS * chip->ecc.strength,
BITS_PER_BYTE);
if (section > 0)
return -ERANGE;
oobregion->offset = SKIP_SPARE_BYTES;
oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
return 0;
}
static int tegra_nand_ooblayout_no_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
return -ERANGE;
}
static const struct mtd_ooblayout_ops tegra_nand_oob_rs_ops = {
.ecc = tegra_nand_ooblayout_rs_ecc,
.free = tegra_nand_ooblayout_no_free,
};
static int tegra_nand_ooblayout_bch_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_BCH * chip->ecc.strength,
BITS_PER_BYTE);
if (section > 0)
return -ERANGE;
oobregion->offset = SKIP_SPARE_BYTES;
oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
return 0;
}
static const struct mtd_ooblayout_ops tegra_nand_oob_bch_ops = {
.ecc = tegra_nand_ooblayout_bch_ecc,
.free = tegra_nand_ooblayout_no_free,
};
static irqreturn_t tegra_nand_irq(int irq, void *data)
{
struct tegra_nand_controller *ctrl = data;
u32 isr, dma;
isr = readl_relaxed(ctrl->regs + ISR);
dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
dev_dbg(ctrl->dev, "isr %08x\n", isr);
if (!isr && !(dma & DMA_MST_CTRL_IS_DONE))
return IRQ_NONE;
/*
* The bit name is somewhat missleading: This is also set when
* HW ECC was successful. The data sheet states:
* Correctable OR Un-correctable errors occurred in the DMA transfer...
*/
if (isr & ISR_CORRFAIL_ERR)
ctrl->last_read_error = true;
if (isr & ISR_CMD_DONE)
complete(&ctrl->command_complete);
if (isr & ISR_UND)
dev_err(ctrl->dev, "FIFO underrun\n");
if (isr & ISR_OVR)
dev_err(ctrl->dev, "FIFO overrun\n");
/* handle DMA interrupts */
if (dma & DMA_MST_CTRL_IS_DONE) {
writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
complete(&ctrl->dma_complete);
}
/* clear interrupts */
writel_relaxed(isr, ctrl->regs + ISR);
return IRQ_HANDLED;
}
static const char * const tegra_nand_reg_names[] = {
"COMMAND",
"STATUS",
"ISR",
"IER",
"CONFIG",
"TIMING",
NULL,
"TIMING2",
"CMD_REG1",
"CMD_REG2",
"ADDR_REG1",
"ADDR_REG2",
"DMA_MST_CTRL",
"DMA_CFG_A",
"DMA_CFG_B",
"FIFO_CTRL",
};
static void tegra_nand_dump_reg(struct tegra_nand_controller *ctrl)
{
u32 reg;
int i;
dev_err(ctrl->dev, "Tegra NAND controller register dump\n");
for (i = 0; i < ARRAY_SIZE(tegra_nand_reg_names); i++) {
const char *reg_name = tegra_nand_reg_names[i];
if (!reg_name)
continue;
reg = readl_relaxed(ctrl->regs + (i * 4));
dev_err(ctrl->dev, "%s: 0x%08x\n", reg_name, reg);
}
}
static void tegra_nand_controller_abort(struct tegra_nand_controller *ctrl)
{
u32 isr, dma;
disable_irq(ctrl->irq);
/* Abort current command/DMA operation */
writel_relaxed(0, ctrl->regs + DMA_MST_CTRL);
writel_relaxed(0, ctrl->regs + COMMAND);
/* clear interrupts */
isr = readl_relaxed(ctrl->regs + ISR);
writel_relaxed(isr, ctrl->regs + ISR);
dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
reinit_completion(&ctrl->command_complete);
reinit_completion(&ctrl->dma_complete);
enable_irq(ctrl->irq);
}
static int tegra_nand_cmd(struct nand_chip *chip,
const struct nand_subop *subop)
{
const struct nand_op_instr *instr;
const struct nand_op_instr *instr_data_in = NULL;
struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
unsigned int op_id, size = 0, offset = 0;
bool first_cmd = true;
u32 reg, cmd = 0;
int ret;
for (op_id = 0; op_id < subop->ninstrs; op_id++) {
unsigned int naddrs, i;
const u8 *addrs;
u32 addr1 = 0, addr2 = 0;
instr = &subop->instrs[op_id];
switch (instr->type) {
case NAND_OP_CMD_INSTR:
if (first_cmd) {
cmd |= COMMAND_CLE;
writel_relaxed(instr->ctx.cmd.opcode,
ctrl->regs + CMD_REG1);
} else {
cmd |= COMMAND_SEC_CMD;
writel_relaxed(instr->ctx.cmd.opcode,
ctrl->regs + CMD_REG2);
}
first_cmd = false;
break;
case NAND_OP_ADDR_INSTR:
offset = nand_subop_get_addr_start_off(subop, op_id);
naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
addrs = &instr->ctx.addr.addrs[offset];
cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(naddrs);
for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
addr1 |= *addrs++ << (BITS_PER_BYTE * i);
naddrs -= i;
for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
addr2 |= *addrs++ << (BITS_PER_BYTE * i);
writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
writel_relaxed(addr2, ctrl->regs + ADDR_REG2);
break;
case NAND_OP_DATA_IN_INSTR:
size = nand_subop_get_data_len(subop, op_id);
offset = nand_subop_get_data_start_off(subop, op_id);
cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
COMMAND_RX | COMMAND_A_VALID;
instr_data_in = instr;
break;
case NAND_OP_DATA_OUT_INSTR:
size = nand_subop_get_data_len(subop, op_id);
offset = nand_subop_get_data_start_off(subop, op_id);
cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
COMMAND_TX | COMMAND_A_VALID;
memcpy(®, instr->ctx.data.buf.out + offset, size);
writel_relaxed(reg, ctrl->regs + RESP);
break;
case NAND_OP_WAITRDY_INSTR:
cmd |= COMMAND_RBSY_CHK;
break;
}
}
cmd |= COMMAND_GO | COMMAND_CE(ctrl->cur_cs);
writel_relaxed(cmd, ctrl->regs + COMMAND);
ret = wait_for_completion_timeout(&ctrl->command_complete,
msecs_to_jiffies(500));
if (!ret) {
dev_err(ctrl->dev, "COMMAND timeout\n");
tegra_nand_dump_reg(ctrl);
tegra_nand_controller_abort(ctrl);
return -ETIMEDOUT;
}
if (instr_data_in) {
reg = readl_relaxed(ctrl->regs + RESP);
memcpy(instr_data_in->ctx.data.buf.in + offset, ®, size);
}
return 0;
}
static const struct nand_op_parser tegra_nand_op_parser = NAND_OP_PARSER(
NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 4)),
NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 4)),
);
static void tegra_nand_select_target(struct nand_chip *chip,
unsigned int die_nr)
{
struct tegra_nand_chip *nand = to_tegra_chip(chip);
struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
ctrl->cur_cs = nand->cs[die_nr];
}
static int tegra_nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
if (!check_only)
tegra_nand_select_target(chip, op->cs);
return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
check_only);
}
static void tegra_nand_hw_ecc(struct tegra_nand_controller *ctrl,
struct nand_chip *chip, bool enable)
{
struct tegra_nand_chip *nand = to_tegra_chip(chip);
if (chip->ecc.algo == NAND_ECC_ALGO_BCH && enable)
writel_relaxed(nand->bch_config, ctrl->regs + BCH_CONFIG);
else
writel_relaxed(0, ctrl->regs + BCH_CONFIG);
if (enable)
writel_relaxed(nand->config_ecc, ctrl->regs + CONFIG);
else
writel_relaxed(nand->config, ctrl->regs + CONFIG);
}
static int tegra_nand_page_xfer(struct mtd_info *mtd, struct nand_chip *chip,
void *buf, void *oob_buf, int oob_len, int page,
bool read)
{
struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
dma_addr_t dma_addr = 0, dma_addr_oob = 0;
u32 addr1, cmd, dma_ctrl;
int ret;
tegra_nand_select_target(chip, chip->cur_cs);
if (read) {
writel_relaxed(NAND_CMD_READ0, ctrl->regs + CMD_REG1);
writel_relaxed(NAND_CMD_READSTART, ctrl->regs + CMD_REG2);
} else {
writel_relaxed(NAND_CMD_SEQIN, ctrl->regs + CMD_REG1);
writel_relaxed(NAND_CMD_PAGEPROG, ctrl->regs + CMD_REG2);
}
cmd = COMMAND_CLE | COMMAND_SEC_CMD;
/* Lower 16-bits are column, by default 0 */
addr1 = page << 16;
if (!buf)
addr1 |= mtd->writesize;
writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
if (chip->options & NAND_ROW_ADDR_3) {
writel_relaxed(page >> 16, ctrl->regs + ADDR_REG2);
cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(5);
} else {
cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(4);
}
if (buf) {
dma_addr = dma_map_single(ctrl->dev, buf, mtd->writesize, dir);
ret = dma_mapping_error(ctrl->dev, dma_addr);
if (ret) {
dev_err(ctrl->dev, "dma mapping error\n");
return -EINVAL;
}
writel_relaxed(mtd->writesize - 1, ctrl->regs + DMA_CFG_A);
writel_relaxed(dma_addr, ctrl->regs + DATA_PTR);
}
if (oob_buf) {
dma_addr_oob = dma_map_single(ctrl->dev, oob_buf, mtd->oobsize,
dir);
ret = dma_mapping_error(ctrl->dev, dma_addr_oob);
if (ret) {
dev_err(ctrl->dev, "dma mapping error\n");
ret = -EINVAL;
goto err_unmap_dma_page;
}
writel_relaxed(oob_len - 1, ctrl->regs + DMA_CFG_B);
writel_relaxed(dma_addr_oob, ctrl->regs + TAG_PTR);
}
dma_ctrl = DMA_MST_CTRL_GO | DMA_MST_CTRL_PERF_EN |
DMA_MST_CTRL_IE_DONE | DMA_MST_CTRL_IS_DONE |
DMA_MST_CTRL_BURST_16;
if (buf)
dma_ctrl |= DMA_MST_CTRL_EN_A;
if (oob_buf)
dma_ctrl |= DMA_MST_CTRL_EN_B;
if (read)
dma_ctrl |= DMA_MST_CTRL_IN | DMA_MST_CTRL_REUSE;
else
dma_ctrl |= DMA_MST_CTRL_OUT;
writel_relaxed(dma_ctrl, ctrl->regs + DMA_MST_CTRL);
cmd |= COMMAND_GO | COMMAND_RBSY_CHK | COMMAND_TRANS_SIZE(9) |
COMMAND_CE(ctrl->cur_cs);
if (buf)
cmd |= COMMAND_A_VALID;
if (oob_buf)
cmd |= COMMAND_B_VALID;
if (read)
cmd |= COMMAND_RX;
else
cmd |= COMMAND_TX | COMMAND_AFT_DAT;
writel_relaxed(cmd, ctrl->regs + COMMAND);
ret = wait_for_completion_timeout(&ctrl->command_complete,
msecs_to_jiffies(500));
if (!ret) {
dev_err(ctrl->dev, "COMMAND timeout\n");
tegra_nand_dump_reg(ctrl);
tegra_nand_controller_abort(ctrl);
ret = -ETIMEDOUT;
goto err_unmap_dma;
}
ret = wait_for_completion_timeout(&ctrl->dma_complete,
msecs_to_jiffies(500));
if (!ret) {
dev_err(ctrl->dev, "DMA timeout\n");
tegra_nand_dump_reg(ctrl);
tegra_nand_controller_abort(ctrl);
ret = -ETIMEDOUT;
goto err_unmap_dma;
}
ret = 0;
err_unmap_dma:
if (oob_buf)
dma_unmap_single(ctrl->dev, dma_addr_oob, mtd->oobsize, dir);
err_unmap_dma_page:
if (buf)
dma_unmap_single(ctrl->dev, dma_addr, mtd->writesize, dir);
return ret;
}
static int tegra_nand_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
void *oob_buf = oob_required ? chip->oob_poi : NULL;
return tegra_nand_page_xfer(mtd, chip, buf, oob_buf,
mtd->oobsize, page, true);
}
static int tegra_nand_write_page_raw(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
void *oob_buf = oob_required ? chip->oob_poi : NULL;
return tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
mtd->oobsize, page, false);
}
static int tegra_nand_read_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
mtd->oobsize, page, true);
}
static int tegra_nand_write_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
mtd->oobsize, page, false);
}
static int tegra_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
struct tegra_nand_chip *nand = to_tegra_chip(chip);
void *oob_buf = oob_required ? chip->oob_poi : NULL;
u32 dec_stat, max_corr_cnt;
unsigned long fail_sec_flag;
int ret;
tegra_nand_hw_ecc(ctrl, chip, true);
ret = tegra_nand_page_xfer(mtd, chip, buf, oob_buf, 0, page, true);
tegra_nand_hw_ecc(ctrl, chip, false);
if (ret)
return ret;
/* No correctable or un-correctable errors, page must have 0 bitflips */
if (!ctrl->last_read_error)
return 0;
/*
* Correctable or un-correctable errors occurred. Use DEC_STAT_BUF
* which contains information for all ECC selections.
*
* Note that since we do not use Command Queues DEC_RESULT does not
* state the number of pages we can read from the DEC_STAT_BUF. But
* since CORRFAIL_ERR did occur during page read we do have a valid
* result in DEC_STAT_BUF.
*/
ctrl->last_read_error = false;
dec_stat = readl_relaxed(ctrl->regs + DEC_STAT_BUF);
fail_sec_flag = (dec_stat & DEC_STAT_BUF_FAIL_SEC_FLAG_MASK) >>
DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT;
max_corr_cnt = (dec_stat & DEC_STAT_BUF_MAX_CORR_CNT_MASK) >>
DEC_STAT_BUF_MAX_CORR_CNT_SHIFT;
if (fail_sec_flag) {
int bit, max_bitflips = 0;
/*
* Since we do not support subpage writes, a complete page
* is either written or not. We can take a shortcut here by
* checking wheather any of the sector has been successful
* read. If at least one sectors has been read successfully,
* the page must have been a written previously. It cannot
* be an erased page.
*
* E.g. controller might return fail_sec_flag with 0x4, which
* would mean only the third sector failed to correct. The
* page must have been written and the third sector is really
* not correctable anymore.
*/
if (fail_sec_flag ^ GENMASK(chip->ecc.steps - 1, 0)) {
mtd->ecc_stats.failed += hweight8(fail_sec_flag);
return max_corr_cnt;
}
/*
* All sectors failed to correct, but the ECC isn't smart
* enough to figure out if a page is really just erased.
* Read OOB data and check whether data/OOB is completely
* erased or if error correction just failed for all sub-
* pages.
*/
ret = tegra_nand_read_oob(chip, page);
if (ret < 0)
return ret;
for_each_set_bit(bit, &fail_sec_flag, chip->ecc.steps) {
u8 *data = buf + (chip->ecc.size * bit);
u8 *oob = chip->oob_poi + nand->ecc.offset +
(chip->ecc.bytes * bit);
ret = nand_check_erased_ecc_chunk(data, chip->ecc.size,
oob, chip->ecc.bytes,
NULL, 0,
chip->ecc.strength);
if (ret < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += ret;
max_bitflips = max(ret, max_bitflips);
}
}
return max_t(unsigned int, max_corr_cnt, max_bitflips);
} else {
int corr_sec_flag;
corr_sec_flag = (dec_stat & DEC_STAT_BUF_CORR_SEC_FLAG_MASK) >>
DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT;
/*
* The value returned in the register is the maximum of
* bitflips encountered in any of the ECC regions. As there is
* no way to get the number of bitflips in a specific regions
* we are not able to deliver correct stats but instead
* overestimate the number of corrected bitflips by assuming
* that all regions where errors have been corrected
* encountered the maximum number of bitflips.
*/
mtd->ecc_stats.corrected += max_corr_cnt * hweight8(corr_sec_flag);
return max_corr_cnt;
}
}
static int tegra_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
void *oob_buf = oob_required ? chip->oob_poi : NULL;
int ret;
tegra_nand_hw_ecc(ctrl, chip, true);
ret = tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
0, page, false);
tegra_nand_hw_ecc(ctrl, chip, false);
return ret;
}
static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl,
const struct nand_sdr_timings *timings)
{
/*
* The period (and all other timings in this function) is in ps,
* so need to take care here to avoid integer overflows.
*/
unsigned int rate = clk_get_rate(ctrl->clk) / 1000000;
unsigned int period = DIV_ROUND_UP(1000000, rate);
u32 val, reg = 0;
val = DIV_ROUND_UP(max3(timings->tAR_min, timings->tRR_min,
timings->tRC_min), period);
reg |= TIMING_TCR_TAR_TRR(OFFSET(val, 3));
val = DIV_ROUND_UP(max(max(timings->tCS_min, timings->tCH_min),
max(timings->tALS_min, timings->tALH_min)),
period);
reg |= TIMING_TCS(OFFSET(val, 2));
val = DIV_ROUND_UP(max(timings->tRP_min, timings->tREA_max) + 6000,
period);
reg |= TIMING_TRP(OFFSET(val, 1)) | TIMING_TRP_RESP(OFFSET(val, 1));
reg |= TIMING_TWB(OFFSET(DIV_ROUND_UP(timings->tWB_max, period), 1));
reg |= TIMING_TWHR(OFFSET(DIV_ROUND_UP(timings->tWHR_min, period), 1));
reg |= TIMING_TWH(OFFSET(DIV_ROUND_UP(timings->tWH_min, period), 1));
reg |= TIMING_TWP(OFFSET(DIV_ROUND_UP(timings->tWP_min, period), 1));
reg |= TIMING_TRH(OFFSET(DIV_ROUND_UP(timings->tREH_min, period), 1));
writel_relaxed(reg, ctrl->regs + TIMING_1);
val = DIV_ROUND_UP(timings->tADL_min, period);
reg = TIMING_TADL(OFFSET(val, 3));
writel_relaxed(reg, ctrl->regs + TIMING_2);
}
static int tegra_nand_setup_interface(struct nand_chip *chip, int csline,
const struct nand_interface_config *conf)
{
struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
const struct nand_sdr_timings *timings;
timings = nand_get_sdr_timings(conf);
if (IS_ERR(timings))
return PTR_ERR(timings);
if (csline == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
tegra_nand_setup_timing(ctrl, timings);
return 0;
}
static const int rs_strength_bootable[] = { 4 };
static const int rs_strength[] = { 4, 6, 8 };
static const int bch_strength_bootable[] = { 8, 16 };
static const int bch_strength[] = { 4, 8, 14, 16 };
static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength,
int strength_len, int bits_per_step,
int oobsize)
{
struct nand_device *base = mtd_to_nanddev(nand_to_mtd(chip));
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(base);
bool maximize = base->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH;
int i;
/*
* Loop through available strengths. Backwards in case we try to
* maximize the BCH strength.
*/
for (i = 0; i < strength_len; i++) {
int strength_sel, bytes_per_step, bytes_per_page;
if (maximize) {
strength_sel = strength[strength_len - i - 1];
} else {
strength_sel = strength[i];
if (strength_sel < requirements->strength)
continue;
}
bytes_per_step = DIV_ROUND_UP(bits_per_step * strength_sel,
BITS_PER_BYTE);
bytes_per_page = round_up(bytes_per_step * chip->ecc.steps, 4);
/* Check whether strength fits OOB */
if (bytes_per_page < (oobsize - SKIP_SPARE_BYTES))
return strength_sel;
}
return -EINVAL;
}
static int tegra_nand_select_strength(struct nand_chip *chip, int oobsize)
{
const int *strength;
int strength_len, bits_per_step;
switch (chip->ecc.algo) {
case NAND_ECC_ALGO_RS:
bits_per_step = BITS_PER_STEP_RS;
if (chip->options & NAND_IS_BOOT_MEDIUM) {
strength = rs_strength_bootable;
strength_len = ARRAY_SIZE(rs_strength_bootable);
} else {
strength = rs_strength;
strength_len = ARRAY_SIZE(rs_strength);
}
break;
case NAND_ECC_ALGO_BCH:
bits_per_step = BITS_PER_STEP_BCH;
if (chip->options & NAND_IS_BOOT_MEDIUM) {
strength = bch_strength_bootable;
strength_len = ARRAY_SIZE(bch_strength_bootable);
} else {
strength = bch_strength;
strength_len = ARRAY_SIZE(bch_strength);
}
break;
default:
return -EINVAL;
}
return tegra_nand_get_strength(chip, strength, strength_len,
bits_per_step, oobsize);
}
static int tegra_nand_attach_chip(struct nand_chip *chip)
{
struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(&chip->base);
struct tegra_nand_chip *nand = to_tegra_chip(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
int bits_per_step;
int ret;
if (chip->bbt_options & NAND_BBT_USE_FLASH)
chip->bbt_options |= NAND_BBT_NO_OOB;
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.size = 512;
chip->ecc.steps = mtd->writesize / chip->ecc.size;
if (requirements->step_size != 512) {
dev_err(ctrl->dev, "Unsupported step size %d\n",
requirements->step_size);
return -EINVAL;
}
chip->ecc.read_page = tegra_nand_read_page_hwecc;
chip->ecc.write_page = tegra_nand_write_page_hwecc;
chip->ecc.read_page_raw = tegra_nand_read_page_raw;
chip->ecc.write_page_raw = tegra_nand_write_page_raw;
chip->ecc.read_oob = tegra_nand_read_oob;
chip->ecc.write_oob = tegra_nand_write_oob;
if (chip->options & NAND_BUSWIDTH_16)
nand->config |= CONFIG_BUS_WIDTH_16;
if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) {
if (mtd->writesize < 2048)
chip->ecc.algo = NAND_ECC_ALGO_RS;
else
chip->ecc.algo = NAND_ECC_ALGO_BCH;
}
if (chip->ecc.algo == NAND_ECC_ALGO_BCH && mtd->writesize < 2048) {
dev_err(ctrl->dev, "BCH supports 2K or 4K page size only\n");
return -EINVAL;
}
if (!chip->ecc.strength) {
ret = tegra_nand_select_strength(chip, mtd->oobsize);
if (ret < 0) {
dev_err(ctrl->dev,
"No valid strength found, minimum %d\n",
requirements->strength);
return ret;
}
chip->ecc.strength = ret;
}
nand->config_ecc = CONFIG_PIPE_EN | CONFIG_SKIP_SPARE |
CONFIG_SKIP_SPARE_SIZE_4;
switch (chip->ecc.algo) {
case NAND_ECC_ALGO_RS:
bits_per_step = BITS_PER_STEP_RS * chip->ecc.strength;
mtd_set_ooblayout(mtd, &tegra_nand_oob_rs_ops);
nand->config_ecc |= CONFIG_HW_ECC | CONFIG_ECC_SEL |
CONFIG_ERR_COR;
switch (chip->ecc.strength) {
case 4:
nand->config_ecc |= CONFIG_TVAL_4;
break;
case 6:
nand->config_ecc |= CONFIG_TVAL_6;
break;
case 8:
nand->config_ecc |= CONFIG_TVAL_8;
break;
default:
dev_err(ctrl->dev, "ECC strength %d not supported\n",
chip->ecc.strength);
return -EINVAL;
}
break;
case NAND_ECC_ALGO_BCH:
bits_per_step = BITS_PER_STEP_BCH * chip->ecc.strength;
mtd_set_ooblayout(mtd, &tegra_nand_oob_bch_ops);
nand->bch_config = BCH_ENABLE;
switch (chip->ecc.strength) {
case 4:
nand->bch_config |= BCH_TVAL_4;
break;
case 8:
nand->bch_config |= BCH_TVAL_8;
break;
case 14:
nand->bch_config |= BCH_TVAL_14;
break;
case 16:
nand->bch_config |= BCH_TVAL_16;
break;
default:
dev_err(ctrl->dev, "ECC strength %d not supported\n",
chip->ecc.strength);
return -EINVAL;
}
break;
default:
dev_err(ctrl->dev, "ECC algorithm not supported\n");
return -EINVAL;
}
dev_info(ctrl->dev, "Using %s with strength %d per 512 byte step\n",
chip->ecc.algo == NAND_ECC_ALGO_BCH ? "BCH" : "RS",
chip->ecc.strength);
chip->ecc.bytes = DIV_ROUND_UP(bits_per_step, BITS_PER_BYTE);
switch (mtd->writesize) {
case 256:
nand->config |= CONFIG_PS_256;
break;
case 512:
nand->config |= CONFIG_PS_512;
break;
case 1024:
nand->config |= CONFIG_PS_1024;
break;
case 2048:
nand->config |= CONFIG_PS_2048;
break;
case 4096:
nand->config |= CONFIG_PS_4096;
break;
default:
dev_err(ctrl->dev, "Unsupported writesize %d\n",
mtd->writesize);
return -ENODEV;
}
/* Store complete configuration for HW ECC in config_ecc */
nand->config_ecc |= nand->config;
/* Non-HW ECC read/writes complete OOB */
nand->config |= CONFIG_TAG_BYTE_SIZE(mtd->oobsize - 1);
writel_relaxed(nand->config, ctrl->regs + CONFIG);
return 0;
}
static const struct nand_controller_ops tegra_nand_controller_ops = {
.attach_chip = &tegra_nand_attach_chip,
.exec_op = tegra_nand_exec_op,
.setup_interface = tegra_nand_setup_interface,
};
static int tegra_nand_chips_init(struct device *dev,
struct tegra_nand_controller *ctrl)
{
struct device_node *np = dev->of_node;
struct device_node *np_nand;
int nsels, nchips = of_get_child_count(np);
struct tegra_nand_chip *nand;
struct mtd_info *mtd;
struct nand_chip *chip;
int ret;
u32 cs;
if (nchips != 1) {
dev_err(dev, "Currently only one NAND chip supported\n");
return -EINVAL;
}
np_nand = of_get_next_child(np, NULL);
nsels = of_property_count_elems_of_size(np_nand, "reg", sizeof(u32));
if (nsels != 1) {
dev_err(dev, "Missing/invalid reg property\n");
return -EINVAL;
}
/* Retrieve CS id, currently only single die NAND supported */
ret = of_property_read_u32(np_nand, "reg", &cs);
if (ret) {
dev_err(dev, "could not retrieve reg property: %d\n", ret);
return ret;
}
nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
if (!nand)
return -ENOMEM;
nand->cs[0] = cs;
nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
if (IS_ERR(nand->wp_gpio)) {
ret = PTR_ERR(nand->wp_gpio);
dev_err(dev, "Failed to request WP GPIO: %d\n", ret);
return ret;
}
chip = &nand->chip;
chip->controller = &ctrl->controller;
mtd = nand_to_mtd(chip);
mtd->dev.parent = dev;
mtd->owner = THIS_MODULE;
nand_set_flash_node(chip, np_nand);
if (!mtd->name)
mtd->name = "tegra_nand";
chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA;
ret = nand_scan(chip, 1);
if (ret)
return ret;
mtd_ooblayout_ecc(mtd, 0, &nand->ecc);
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "Failed to register mtd device: %d\n", ret);
nand_cleanup(chip);
return ret;
}
ctrl->chip = chip;
return 0;
}
static int tegra_nand_probe(struct platform_device *pdev)
{
struct reset_control *rst;
struct tegra_nand_controller *ctrl;
int err = 0;
ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
ctrl->dev = &pdev->dev;
platform_set_drvdata(pdev, ctrl);
nand_controller_init(&ctrl->controller);
ctrl->controller.ops = &tegra_nand_controller_ops;
ctrl->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctrl->regs))
return PTR_ERR(ctrl->regs);
rst = devm_reset_control_get(&pdev->dev, "nand");
if (IS_ERR(rst))
return PTR_ERR(rst);
ctrl->clk = devm_clk_get(&pdev->dev, "nand");
if (IS_ERR(ctrl->clk))
return PTR_ERR(ctrl->clk);
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
return err;
/*
* This driver doesn't support active power management yet,
* so we will simply keep device resumed.
*/
pm_runtime_enable(&pdev->dev);
err = pm_runtime_resume_and_get(&pdev->dev);
if (err)
goto err_dis_pm;
err = reset_control_reset(rst);
if (err) {
dev_err(ctrl->dev, "Failed to reset HW: %d\n", err);
goto err_put_pm;
}
writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD);
writel_relaxed(HWSTATUS_MASK_DEFAULT, ctrl->regs + HWSTATUS_MASK);
writel_relaxed(INT_MASK, ctrl->regs + IER);
init_completion(&ctrl->command_complete);
init_completion(&ctrl->dma_complete);
ctrl->irq = platform_get_irq(pdev, 0);
err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
dev_name(&pdev->dev), ctrl);
if (err) {
dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err);
goto err_put_pm;
}
writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL);
err = tegra_nand_chips_init(ctrl->dev, ctrl);
if (err)
goto err_put_pm;
return 0;
err_put_pm:
pm_runtime_put_sync_suspend(ctrl->dev);
pm_runtime_force_suspend(ctrl->dev);
err_dis_pm:
pm_runtime_disable(&pdev->dev);
return err;
}
static void tegra_nand_remove(struct platform_device *pdev)
{
struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev);
struct nand_chip *chip = ctrl->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
WARN_ON(mtd_device_unregister(mtd));
nand_cleanup(chip);
pm_runtime_put_sync_suspend(ctrl->dev);
pm_runtime_force_suspend(ctrl->dev);
}
static int __maybe_unused tegra_nand_runtime_resume(struct device *dev)
{
struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
int err;
err = clk_prepare_enable(ctrl->clk);
if (err) {
dev_err(dev, "Failed to enable clock: %d\n", err);
return err;
}
return 0;
}
static int __maybe_unused tegra_nand_runtime_suspend(struct device *dev)
{
struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
clk_disable_unprepare(ctrl->clk);
return 0;
}
static const struct dev_pm_ops tegra_nand_pm = {
SET_RUNTIME_PM_OPS(tegra_nand_runtime_suspend, tegra_nand_runtime_resume,
NULL)
};
static const struct of_device_id tegra_nand_of_match[] = {
{ .compatible = "nvidia,tegra20-nand" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, tegra_nand_of_match);
static struct platform_driver tegra_nand_driver = {
.driver = {
.name = "tegra-nand",
.of_match_table = tegra_nand_of_match,
.pm = &tegra_nand_pm,
},
.probe = tegra_nand_probe,
.remove_new = tegra_nand_remove,
};
module_platform_driver(tegra_nand_driver);
MODULE_DESCRIPTION("NVIDIA Tegra NAND driver");
MODULE_AUTHOR("Thierry Reding <[email protected]>");
MODULE_AUTHOR("Lucas Stach <[email protected]>");
MODULE_AUTHOR("Stefan Agner <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mtd/nand/raw/tegra_nand.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2013 Boris BREZILLON <[email protected]>
*
* Derived from:
* https://github.com/yuq/sunxi-nfc-mtd
* Copyright (C) 2013 Qiang Yu <[email protected]>
*
* https://github.com/hno/Allwinner-Info
* Copyright (C) 2013 Henrik Nordström <Henrik Nordström>
*
* Copyright (C) 2013 Dmitriy B. <[email protected]>
* Copyright (C) 2013 Sergey Lapin <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/reset.h>
#define NFC_REG_CTL 0x0000
#define NFC_REG_ST 0x0004
#define NFC_REG_INT 0x0008
#define NFC_REG_TIMING_CTL 0x000C
#define NFC_REG_TIMING_CFG 0x0010
#define NFC_REG_ADDR_LOW 0x0014
#define NFC_REG_ADDR_HIGH 0x0018
#define NFC_REG_SECTOR_NUM 0x001C
#define NFC_REG_CNT 0x0020
#define NFC_REG_CMD 0x0024
#define NFC_REG_RCMD_SET 0x0028
#define NFC_REG_WCMD_SET 0x002C
#define NFC_REG_A10_IO_DATA 0x0030
#define NFC_REG_A23_IO_DATA 0x0300
#define NFC_REG_ECC_CTL 0x0034
#define NFC_REG_ECC_ST 0x0038
#define NFC_REG_DEBUG 0x003C
#define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3)
#define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
#define NFC_REG_SPARE_AREA 0x00A0
#define NFC_REG_PAT_ID 0x00A4
#define NFC_REG_MDMA_ADDR 0x00C0
#define NFC_REG_MDMA_CNT 0x00C4
#define NFC_RAM0_BASE 0x0400
#define NFC_RAM1_BASE 0x0800
/* define bit use in NFC_CTL */
#define NFC_EN BIT(0)
#define NFC_RESET BIT(1)
#define NFC_BUS_WIDTH_MSK BIT(2)
#define NFC_BUS_WIDTH_8 (0 << 2)
#define NFC_BUS_WIDTH_16 (1 << 2)
#define NFC_RB_SEL_MSK BIT(3)
#define NFC_RB_SEL(x) ((x) << 3)
#define NFC_CE_SEL_MSK GENMASK(26, 24)
#define NFC_CE_SEL(x) ((x) << 24)
#define NFC_CE_CTL BIT(6)
#define NFC_PAGE_SHIFT_MSK GENMASK(11, 8)
#define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8)
#define NFC_SAM BIT(12)
#define NFC_RAM_METHOD BIT(14)
#define NFC_DMA_TYPE_NORMAL BIT(15)
#define NFC_DEBUG_CTL BIT(31)
/* define bit use in NFC_ST */
#define NFC_RB_B2R BIT(0)
#define NFC_CMD_INT_FLAG BIT(1)
#define NFC_DMA_INT_FLAG BIT(2)
#define NFC_CMD_FIFO_STATUS BIT(3)
#define NFC_STA BIT(4)
#define NFC_NATCH_INT_FLAG BIT(5)
#define NFC_RB_STATE(x) BIT(x + 8)
/* define bit use in NFC_INT */
#define NFC_B2R_INT_ENABLE BIT(0)
#define NFC_CMD_INT_ENABLE BIT(1)
#define NFC_DMA_INT_ENABLE BIT(2)
#define NFC_INT_MASK (NFC_B2R_INT_ENABLE | \
NFC_CMD_INT_ENABLE | \
NFC_DMA_INT_ENABLE)
/* define bit use in NFC_TIMING_CTL */
#define NFC_TIMING_CTL_EDO BIT(8)
/* define NFC_TIMING_CFG register layout */
#define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD) \
(((tWB) & 0x3) | (((tADL) & 0x3) << 2) | \
(((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) | \
(((tCAD) & 0x7) << 8))
/* define bit use in NFC_CMD */
#define NFC_CMD_LOW_BYTE_MSK GENMASK(7, 0)
#define NFC_CMD_HIGH_BYTE_MSK GENMASK(15, 8)
#define NFC_CMD(x) (x)
#define NFC_ADR_NUM_MSK GENMASK(18, 16)
#define NFC_ADR_NUM(x) (((x) - 1) << 16)
#define NFC_SEND_ADR BIT(19)
#define NFC_ACCESS_DIR BIT(20)
#define NFC_DATA_TRANS BIT(21)
#define NFC_SEND_CMD1 BIT(22)
#define NFC_WAIT_FLAG BIT(23)
#define NFC_SEND_CMD2 BIT(24)
#define NFC_SEQ BIT(25)
#define NFC_DATA_SWAP_METHOD BIT(26)
#define NFC_ROW_AUTO_INC BIT(27)
#define NFC_SEND_CMD3 BIT(28)
#define NFC_SEND_CMD4 BIT(29)
#define NFC_CMD_TYPE_MSK GENMASK(31, 30)
#define NFC_NORMAL_OP (0 << 30)
#define NFC_ECC_OP (1 << 30)
#define NFC_PAGE_OP (2U << 30)
/* define bit use in NFC_RCMD_SET */
#define NFC_READ_CMD_MSK GENMASK(7, 0)
#define NFC_RND_READ_CMD0_MSK GENMASK(15, 8)
#define NFC_RND_READ_CMD1_MSK GENMASK(23, 16)
/* define bit use in NFC_WCMD_SET */
#define NFC_PROGRAM_CMD_MSK GENMASK(7, 0)
#define NFC_RND_WRITE_CMD_MSK GENMASK(15, 8)
#define NFC_READ_CMD0_MSK GENMASK(23, 16)
#define NFC_READ_CMD1_MSK GENMASK(31, 24)
/* define bit use in NFC_ECC_CTL */
#define NFC_ECC_EN BIT(0)
#define NFC_ECC_PIPELINE BIT(3)
#define NFC_ECC_EXCEPTION BIT(4)
#define NFC_ECC_BLOCK_SIZE_MSK BIT(5)
#define NFC_ECC_BLOCK_512 BIT(5)
#define NFC_RANDOM_EN BIT(9)
#define NFC_RANDOM_DIRECTION BIT(10)
#define NFC_ECC_MODE_MSK GENMASK(15, 12)
#define NFC_ECC_MODE(x) ((x) << 12)
#define NFC_RANDOM_SEED_MSK GENMASK(30, 16)
#define NFC_RANDOM_SEED(x) ((x) << 16)
/* define bit use in NFC_ECC_ST */
#define NFC_ECC_ERR(x) BIT(x)
#define NFC_ECC_ERR_MSK GENMASK(15, 0)
#define NFC_ECC_PAT_FOUND(x) BIT(x + 16)
#define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff)
#define NFC_DEFAULT_TIMEOUT_MS 1000
#define NFC_SRAM_SIZE 1024
#define NFC_MAX_CS 7
/**
* struct sunxi_nand_chip_sel - stores information related to NAND Chip Select
*
* @cs: the NAND CS id used to communicate with a NAND Chip
* @rb: the Ready/Busy pin ID. -1 means no R/B pin connected to the NFC
*/
struct sunxi_nand_chip_sel {
u8 cs;
s8 rb;
};
/**
* struct sunxi_nand_hw_ecc - stores information related to HW ECC support
*
* @ecc_ctl: ECC_CTL register value for this NAND chip
*/
struct sunxi_nand_hw_ecc {
u32 ecc_ctl;
};
/**
* struct sunxi_nand_chip - stores NAND chip device related information
*
* @node: used to store NAND chips into a list
* @nand: base NAND chip structure
* @ecc: ECC controller structure
* @clk_rate: clk_rate required for this NAND chip
* @timing_cfg: TIMING_CFG register value for this NAND chip
* @timing_ctl: TIMING_CTL register value for this NAND chip
* @nsels: number of CS lines required by the NAND chip
* @sels: array of CS lines descriptions
*/
struct sunxi_nand_chip {
struct list_head node;
struct nand_chip nand;
struct sunxi_nand_hw_ecc ecc;
unsigned long clk_rate;
u32 timing_cfg;
u32 timing_ctl;
int nsels;
struct sunxi_nand_chip_sel sels[];
};
static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
{
return container_of(nand, struct sunxi_nand_chip, nand);
}
/*
* NAND Controller capabilities structure: stores NAND controller capabilities
* for distinction between compatible strings.
*
* @has_mdma: Use mbus dma mode, otherwise general dma
* through MBUS on A23/A33 needs extra configuration.
* @reg_io_data: I/O data register
* @dma_maxburst: DMA maxburst
*/
struct sunxi_nfc_caps {
bool has_mdma;
unsigned int reg_io_data;
unsigned int dma_maxburst;
};
/**
* struct sunxi_nfc - stores sunxi NAND controller information
*
* @controller: base controller structure
* @dev: parent device (used to print error messages)
* @regs: NAND controller registers
* @ahb_clk: NAND controller AHB clock
* @mod_clk: NAND controller mod clock
* @reset: NAND controller reset line
* @assigned_cs: bitmask describing already assigned CS lines
* @clk_rate: NAND controller current clock rate
* @chips: a list containing all the NAND chips attached to this NAND
* controller
* @complete: a completion object used to wait for NAND controller events
* @dmac: the DMA channel attached to the NAND controller
* @caps: NAND Controller capabilities
*/
struct sunxi_nfc {
struct nand_controller controller;
struct device *dev;
void __iomem *regs;
struct clk *ahb_clk;
struct clk *mod_clk;
struct reset_control *reset;
unsigned long assigned_cs;
unsigned long clk_rate;
struct list_head chips;
struct completion complete;
struct dma_chan *dmac;
const struct sunxi_nfc_caps *caps;
};
static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_controller *ctrl)
{
return container_of(ctrl, struct sunxi_nfc, controller);
}
static irqreturn_t sunxi_nfc_interrupt(int irq, void *dev_id)
{
struct sunxi_nfc *nfc = dev_id;
u32 st = readl(nfc->regs + NFC_REG_ST);
u32 ien = readl(nfc->regs + NFC_REG_INT);
if (!(ien & st))
return IRQ_NONE;
if ((ien & st) == ien)
complete(&nfc->complete);
writel(st & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
writel(~st & ien & NFC_INT_MASK, nfc->regs + NFC_REG_INT);
return IRQ_HANDLED;
}
static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
bool use_polling, unsigned int timeout_ms)
{
int ret;
if (events & ~NFC_INT_MASK)
return -EINVAL;
if (!timeout_ms)
timeout_ms = NFC_DEFAULT_TIMEOUT_MS;
if (!use_polling) {
init_completion(&nfc->complete);
writel(events, nfc->regs + NFC_REG_INT);
ret = wait_for_completion_timeout(&nfc->complete,
msecs_to_jiffies(timeout_ms));
if (!ret)
ret = -ETIMEDOUT;
else
ret = 0;
writel(0, nfc->regs + NFC_REG_INT);
} else {
u32 status;
ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
(status & events) == events, 1,
timeout_ms * 1000);
}
writel(events & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
if (ret)
dev_err(nfc->dev, "wait interrupt timedout\n");
return ret;
}
static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc)
{
u32 status;
int ret;
ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
!(status & NFC_CMD_FIFO_STATUS), 1,
NFC_DEFAULT_TIMEOUT_MS * 1000);
if (ret)
dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
return ret;
}
static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
{
u32 ctl;
int ret;
writel(0, nfc->regs + NFC_REG_ECC_CTL);
writel(NFC_RESET, nfc->regs + NFC_REG_CTL);
ret = readl_poll_timeout(nfc->regs + NFC_REG_CTL, ctl,
!(ctl & NFC_RESET), 1,
NFC_DEFAULT_TIMEOUT_MS * 1000);
if (ret)
dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
return ret;
}
static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf,
int chunksize, int nchunks,
enum dma_data_direction ddir,
struct scatterlist *sg)
{
struct dma_async_tx_descriptor *dmad;
enum dma_transfer_direction tdir;
dma_cookie_t dmat;
int ret;
if (ddir == DMA_FROM_DEVICE)
tdir = DMA_DEV_TO_MEM;
else
tdir = DMA_MEM_TO_DEV;
sg_init_one(sg, buf, nchunks * chunksize);
ret = dma_map_sg(nfc->dev, sg, 1, ddir);
if (!ret)
return -ENOMEM;
if (!nfc->caps->has_mdma) {
dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
if (!dmad) {
ret = -EINVAL;
goto err_unmap_buf;
}
}
writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
nfc->regs + NFC_REG_CTL);
writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
writel(chunksize, nfc->regs + NFC_REG_CNT);
if (nfc->caps->has_mdma) {
writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_DMA_TYPE_NORMAL,
nfc->regs + NFC_REG_CTL);
writel(chunksize * nchunks, nfc->regs + NFC_REG_MDMA_CNT);
writel(sg_dma_address(sg), nfc->regs + NFC_REG_MDMA_ADDR);
} else {
dmat = dmaengine_submit(dmad);
ret = dma_submit_error(dmat);
if (ret)
goto err_clr_dma_flag;
}
return 0;
err_clr_dma_flag:
writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
nfc->regs + NFC_REG_CTL);
err_unmap_buf:
dma_unmap_sg(nfc->dev, sg, 1, ddir);
return ret;
}
static void sunxi_nfc_dma_op_cleanup(struct sunxi_nfc *nfc,
enum dma_data_direction ddir,
struct scatterlist *sg)
{
dma_unmap_sg(nfc->dev, sg, 1, ddir);
writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
nfc->regs + NFC_REG_CTL);
}
static void sunxi_nfc_select_chip(struct nand_chip *nand, unsigned int cs)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
struct sunxi_nand_chip_sel *sel;
u32 ctl;
if (cs >= sunxi_nand->nsels)
return;
ctl = readl(nfc->regs + NFC_REG_CTL) &
~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN);
sel = &sunxi_nand->sels[cs];
ctl |= NFC_CE_SEL(sel->cs) | NFC_EN | NFC_PAGE_SHIFT(nand->page_shift);
if (sel->rb >= 0)
ctl |= NFC_RB_SEL(sel->rb);
writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
if (nfc->clk_rate != sunxi_nand->clk_rate) {
clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
nfc->clk_rate = sunxi_nand->clk_rate;
}
writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL);
writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG);
writel(ctl, nfc->regs + NFC_REG_CTL);
}
static void sunxi_nfc_read_buf(struct nand_chip *nand, uint8_t *buf, int len)
{
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
int ret;
int cnt;
int offs = 0;
u32 tmp;
while (len > offs) {
bool poll = false;
cnt = min(len - offs, NFC_SRAM_SIZE);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
break;
writel(cnt, nfc->regs + NFC_REG_CNT);
tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
writel(tmp, nfc->regs + NFC_REG_CMD);
/* Arbitrary limit for polling mode */
if (cnt < 64)
poll = true;
ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0);
if (ret)
break;
if (buf)
memcpy_fromio(buf + offs, nfc->regs + NFC_RAM0_BASE,
cnt);
offs += cnt;
}
}
static void sunxi_nfc_write_buf(struct nand_chip *nand, const uint8_t *buf,
int len)
{
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
int ret;
int cnt;
int offs = 0;
u32 tmp;
while (len > offs) {
bool poll = false;
cnt = min(len - offs, NFC_SRAM_SIZE);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
break;
writel(cnt, nfc->regs + NFC_REG_CNT);
memcpy_toio(nfc->regs + NFC_RAM0_BASE, buf + offs, cnt);
tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
NFC_ACCESS_DIR;
writel(tmp, nfc->regs + NFC_REG_CMD);
/* Arbitrary limit for polling mode */
if (cnt < 64)
poll = true;
ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, poll, 0);
if (ret)
break;
offs += cnt;
}
}
/* These seed values have been extracted from Allwinner's BSP */
static const u16 sunxi_nfc_randomizer_page_seeds[] = {
0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
};
/*
* sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds
* have been generated using
* sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what
* the randomizer engine does internally before de/scrambling OOB data.
*
* Those tables are statically defined to avoid calculating randomizer state
* at runtime.
*/
static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = {
0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64,
0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409,
0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617,
0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d,
0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91,
0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d,
0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab,
0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8,
0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8,
0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b,
0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5,
0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a,
0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891,
0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36,
0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd,
0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0,
};
static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = {
0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6,
0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982,
0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9,
0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07,
0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e,
0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2,
0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c,
0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f,
0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc,
0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e,
0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8,
0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68,
0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d,
0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179,
0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601,
0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd,
};
static u16 sunxi_nfc_randomizer_step(u16 state, int count)
{
state &= 0x7fff;
/*
* This loop is just a simple implementation of a Fibonacci LFSR using
* the x16 + x15 + 1 polynomial.
*/
while (count--)
state = ((state >> 1) |
(((state ^ (state >> 1)) & 1) << 14)) & 0x7fff;
return state;
}
static u16 sunxi_nfc_randomizer_state(struct nand_chip *nand, int page,
bool ecc)
{
struct mtd_info *mtd = nand_to_mtd(nand);
const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
int mod = mtd_div_by_ws(mtd->erasesize, mtd);
if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds))
mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds);
if (ecc) {
if (mtd->ecc_step_size == 512)
seeds = sunxi_nfc_randomizer_ecc512_seeds;
else
seeds = sunxi_nfc_randomizer_ecc1024_seeds;
}
return seeds[page % mod];
}
static void sunxi_nfc_randomizer_config(struct nand_chip *nand, int page,
bool ecc)
{
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
u16 state;
if (!(nand->options & NAND_NEED_SCRAMBLING))
return;
ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
state = sunxi_nfc_randomizer_state(nand, page, ecc);
ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
}
static void sunxi_nfc_randomizer_enable(struct nand_chip *nand)
{
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
if (!(nand->options & NAND_NEED_SCRAMBLING))
return;
writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN,
nfc->regs + NFC_REG_ECC_CTL);
}
static void sunxi_nfc_randomizer_disable(struct nand_chip *nand)
{
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
if (!(nand->options & NAND_NEED_SCRAMBLING))
return;
writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN,
nfc->regs + NFC_REG_ECC_CTL);
}
static void sunxi_nfc_randomize_bbm(struct nand_chip *nand, int page, u8 *bbm)
{
u16 state = sunxi_nfc_randomizer_state(nand, page, true);
bbm[0] ^= state;
bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
}
static void sunxi_nfc_randomizer_write_buf(struct nand_chip *nand,
const uint8_t *buf, int len,
bool ecc, int page)
{
sunxi_nfc_randomizer_config(nand, page, ecc);
sunxi_nfc_randomizer_enable(nand);
sunxi_nfc_write_buf(nand, buf, len);
sunxi_nfc_randomizer_disable(nand);
}
static void sunxi_nfc_randomizer_read_buf(struct nand_chip *nand, uint8_t *buf,
int len, bool ecc, int page)
{
sunxi_nfc_randomizer_config(nand, page, ecc);
sunxi_nfc_randomizer_enable(nand);
sunxi_nfc_read_buf(nand, buf, len);
sunxi_nfc_randomizer_disable(nand);
}
static void sunxi_nfc_hw_ecc_enable(struct nand_chip *nand)
{
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
writel(sunxi_nand->ecc.ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
}
static void sunxi_nfc_hw_ecc_disable(struct nand_chip *nand)
{
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
writel(0, nfc->regs + NFC_REG_ECC_CTL);
}
static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf)
{
buf[0] = user_data;
buf[1] = user_data >> 8;
buf[2] = user_data >> 16;
buf[3] = user_data >> 24;
}
static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
{
return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
}
static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct nand_chip *nand, u8 *oob,
int step, bool bbm, int page)
{
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)),
oob);
/* De-randomize the Bad Block Marker. */
if (bbm && (nand->options & NAND_NEED_SCRAMBLING))
sunxi_nfc_randomize_bbm(nand, page, oob);
}
static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct nand_chip *nand,
const u8 *oob, int step,
bool bbm, int page)
{
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
u8 user_data[4];
/* Randomize the Bad Block Marker. */
if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) {
memcpy(user_data, oob, sizeof(user_data));
sunxi_nfc_randomize_bbm(nand, page, user_data);
oob = user_data;
}
writel(sunxi_nfc_buf_to_user_data(oob),
nfc->regs + NFC_REG_USER_DATA(step));
}
static void sunxi_nfc_hw_ecc_update_stats(struct nand_chip *nand,
unsigned int *max_bitflips, int ret)
{
struct mtd_info *mtd = nand_to_mtd(nand);
if (ret < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += ret;
*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
}
}
static int sunxi_nfc_hw_ecc_correct(struct nand_chip *nand, u8 *data, u8 *oob,
int step, u32 status, bool *erased)
{
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
u32 tmp;
*erased = false;
if (status & NFC_ECC_ERR(step))
return -EBADMSG;
if (status & NFC_ECC_PAT_FOUND(step)) {
u8 pattern;
if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) {
pattern = 0x0;
} else {
pattern = 0xff;
*erased = true;
}
if (data)
memset(data, pattern, ecc->size);
if (oob)
memset(oob, pattern, ecc->bytes + 4);
return 0;
}
tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(step));
return NFC_ECC_ERR_CNT(step, tmp);
}
static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
u8 *data, int data_off,
u8 *oob, int oob_off,
int *cur_off,
unsigned int *max_bitflips,
bool bbm, bool oob_required, int page)
{
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int raw_mode = 0;
bool erased;
int ret;
if (*cur_off != data_off)
nand_change_read_column_op(nand, data_off, NULL, 0, false);
sunxi_nfc_randomizer_read_buf(nand, NULL, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
nand_change_read_column_op(nand, oob_off, NULL, 0, false);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
return ret;
sunxi_nfc_randomizer_enable(nand);
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
sunxi_nfc_randomizer_disable(nand);
if (ret)
return ret;
*cur_off = oob_off + ecc->bytes + 4;
ret = sunxi_nfc_hw_ecc_correct(nand, data, oob_required ? oob : NULL, 0,
readl(nfc->regs + NFC_REG_ECC_ST),
&erased);
if (erased)
return 1;
if (ret < 0) {
/*
* Re-read the data with the randomizer disabled to identify
* bitflips in erased pages.
*/
if (nand->options & NAND_NEED_SCRAMBLING)
nand_change_read_column_op(nand, data_off, data,
ecc->size, false);
else
memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
ecc->size);
nand_change_read_column_op(nand, oob_off, oob, ecc->bytes + 4,
false);
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 4,
NULL, 0, ecc->strength);
if (ret >= 0)
raw_mode = 1;
} else {
memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
if (oob_required) {
nand_change_read_column_op(nand, oob_off, NULL, 0,
false);
sunxi_nfc_randomizer_read_buf(nand, oob, ecc->bytes + 4,
true, page);
sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, 0,
bbm, page);
}
}
sunxi_nfc_hw_ecc_update_stats(nand, max_bitflips, ret);
return raw_mode;
}
static void sunxi_nfc_hw_ecc_read_extra_oob(struct nand_chip *nand,
u8 *oob, int *cur_off,
bool randomize, int page)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int offset = ((ecc->bytes + 4) * ecc->steps);
int len = mtd->oobsize - offset;
if (len <= 0)
return;
if (!cur_off || *cur_off != offset)
nand_change_read_column_op(nand, mtd->writesize, NULL, 0,
false);
if (!randomize)
sunxi_nfc_read_buf(nand, oob + offset, len);
else
sunxi_nfc_randomizer_read_buf(nand, oob + offset, len,
false, page);
if (cur_off)
*cur_off = mtd->oobsize + mtd->writesize;
}
static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf,
int oob_required, int page,
int nchunks)
{
bool randomized = nand->options & NAND_NEED_SCRAMBLING;
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
unsigned int max_bitflips = 0;
int ret, i, raw_mode = 0;
struct scatterlist sg;
u32 status, wait;
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
return ret;
ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, nchunks,
DMA_FROM_DEVICE, &sg);
if (ret)
return ret;
sunxi_nfc_hw_ecc_enable(nand);
sunxi_nfc_randomizer_config(nand, page, false);
sunxi_nfc_randomizer_enable(nand);
writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
wait = NFC_CMD_INT_FLAG;
if (nfc->caps->has_mdma)
wait |= NFC_DMA_INT_FLAG;
else
dma_async_issue_pending(nfc->dmac);
writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
nfc->regs + NFC_REG_CMD);
ret = sunxi_nfc_wait_events(nfc, wait, false, 0);
if (ret && !nfc->caps->has_mdma)
dmaengine_terminate_all(nfc->dmac);
sunxi_nfc_randomizer_disable(nand);
sunxi_nfc_hw_ecc_disable(nand);
sunxi_nfc_dma_op_cleanup(nfc, DMA_FROM_DEVICE, &sg);
if (ret)
return ret;
status = readl(nfc->regs + NFC_REG_ECC_ST);
for (i = 0; i < nchunks; i++) {
int data_off = i * ecc->size;
int oob_off = i * (ecc->bytes + 4);
u8 *data = buf + data_off;
u8 *oob = nand->oob_poi + oob_off;
bool erased;
ret = sunxi_nfc_hw_ecc_correct(nand, randomized ? data : NULL,
oob_required ? oob : NULL,
i, status, &erased);
/* ECC errors are handled in the second loop. */
if (ret < 0)
continue;
if (oob_required && !erased) {
/* TODO: use DMA to retrieve OOB */
nand_change_read_column_op(nand,
mtd->writesize + oob_off,
oob, ecc->bytes + 4, false);
sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, i,
!i, page);
}
if (erased)
raw_mode = 1;
sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret);
}
if (status & NFC_ECC_ERR_MSK) {
for (i = 0; i < nchunks; i++) {
int data_off = i * ecc->size;
int oob_off = i * (ecc->bytes + 4);
u8 *data = buf + data_off;
u8 *oob = nand->oob_poi + oob_off;
if (!(status & NFC_ECC_ERR(i)))
continue;
/*
* Re-read the data with the randomizer disabled to
* identify bitflips in erased pages.
* TODO: use DMA to read page in raw mode
*/
if (randomized)
nand_change_read_column_op(nand, data_off,
data, ecc->size,
false);
/* TODO: use DMA to retrieve OOB */
nand_change_read_column_op(nand,
mtd->writesize + oob_off,
oob, ecc->bytes + 4, false);
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 4,
NULL, 0,
ecc->strength);
if (ret >= 0)
raw_mode = 1;
sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret);
}
}
if (oob_required)
sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi,
NULL, !raw_mode,
page);
return max_bitflips;
}
static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand,
const u8 *data, int data_off,
const u8 *oob, int oob_off,
int *cur_off, bool bbm,
int page)
{
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int ret;
if (data_off != *cur_off)
nand_change_write_column_op(nand, data_off, NULL, 0, false);
sunxi_nfc_randomizer_write_buf(nand, data, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
nand_change_write_column_op(nand, oob_off, NULL, 0, false);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
return ret;
sunxi_nfc_randomizer_enable(nand);
sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page);
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
NFC_ACCESS_DIR | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0);
sunxi_nfc_randomizer_disable(nand);
if (ret)
return ret;
*cur_off = oob_off + ecc->bytes + 4;
return 0;
}
static void sunxi_nfc_hw_ecc_write_extra_oob(struct nand_chip *nand,
u8 *oob, int *cur_off,
int page)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int offset = ((ecc->bytes + 4) * ecc->steps);
int len = mtd->oobsize - offset;
if (len <= 0)
return;
if (!cur_off || *cur_off != offset)
nand_change_write_column_op(nand, offset + mtd->writesize,
NULL, 0, false);
sunxi_nfc_randomizer_write_buf(nand, oob + offset, len, false, page);
if (cur_off)
*cur_off = mtd->oobsize + mtd->writesize;
}
static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *nand, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
unsigned int max_bitflips = 0;
int ret, i, cur_off = 0;
bool raw_mode = false;
sunxi_nfc_select_chip(nand, nand->cur_cs);
nand_read_page_op(nand, page, 0, NULL, 0);
sunxi_nfc_hw_ecc_enable(nand);
for (i = 0; i < ecc->steps; i++) {
int data_off = i * ecc->size;
int oob_off = i * (ecc->bytes + 4);
u8 *data = buf + data_off;
u8 *oob = nand->oob_poi + oob_off;
ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off, oob,
oob_off + mtd->writesize,
&cur_off, &max_bitflips,
!i, oob_required, page);
if (ret < 0)
return ret;
else if (ret)
raw_mode = true;
}
if (oob_required)
sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi, &cur_off,
!raw_mode, page);
sunxi_nfc_hw_ecc_disable(nand);
return max_bitflips;
}
static int sunxi_nfc_hw_ecc_read_page_dma(struct nand_chip *nand, u8 *buf,
int oob_required, int page)
{
int ret;
sunxi_nfc_select_chip(nand, nand->cur_cs);
nand_read_page_op(nand, page, 0, NULL, 0);
ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, oob_required, page,
nand->ecc.steps);
if (ret >= 0)
return ret;
/* Fallback to PIO mode */
return sunxi_nfc_hw_ecc_read_page(nand, buf, oob_required, page);
}
static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *nand,
u32 data_offs, u32 readlen,
u8 *bufpoi, int page)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int ret, i, cur_off = 0;
unsigned int max_bitflips = 0;
sunxi_nfc_select_chip(nand, nand->cur_cs);
nand_read_page_op(nand, page, 0, NULL, 0);
sunxi_nfc_hw_ecc_enable(nand);
for (i = data_offs / ecc->size;
i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
int data_off = i * ecc->size;
int oob_off = i * (ecc->bytes + 4);
u8 *data = bufpoi + data_off;
u8 *oob = nand->oob_poi + oob_off;
ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off,
oob,
oob_off + mtd->writesize,
&cur_off, &max_bitflips, !i,
false, page);
if (ret < 0)
return ret;
}
sunxi_nfc_hw_ecc_disable(nand);
return max_bitflips;
}
static int sunxi_nfc_hw_ecc_read_subpage_dma(struct nand_chip *nand,
u32 data_offs, u32 readlen,
u8 *buf, int page)
{
int nchunks = DIV_ROUND_UP(data_offs + readlen, nand->ecc.size);
int ret;
sunxi_nfc_select_chip(nand, nand->cur_cs);
nand_read_page_op(nand, page, 0, NULL, 0);
ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, false, page, nchunks);
if (ret >= 0)
return ret;
/* Fallback to PIO mode */
return sunxi_nfc_hw_ecc_read_subpage(nand, data_offs, readlen,
buf, page);
}
static int sunxi_nfc_hw_ecc_write_page(struct nand_chip *nand,
const uint8_t *buf, int oob_required,
int page)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int ret, i, cur_off = 0;
sunxi_nfc_select_chip(nand, nand->cur_cs);
nand_prog_page_begin_op(nand, page, 0, NULL, 0);
sunxi_nfc_hw_ecc_enable(nand);
for (i = 0; i < ecc->steps; i++) {
int data_off = i * ecc->size;
int oob_off = i * (ecc->bytes + 4);
const u8 *data = buf + data_off;
const u8 *oob = nand->oob_poi + oob_off;
ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob,
oob_off + mtd->writesize,
&cur_off, !i, page);
if (ret)
return ret;
}
if (oob_required || (nand->options & NAND_NEED_SCRAMBLING))
sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi,
&cur_off, page);
sunxi_nfc_hw_ecc_disable(nand);
return nand_prog_page_end_op(nand);
}
static int sunxi_nfc_hw_ecc_write_subpage(struct nand_chip *nand,
u32 data_offs, u32 data_len,
const u8 *buf, int oob_required,
int page)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int ret, i, cur_off = 0;
sunxi_nfc_select_chip(nand, nand->cur_cs);
nand_prog_page_begin_op(nand, page, 0, NULL, 0);
sunxi_nfc_hw_ecc_enable(nand);
for (i = data_offs / ecc->size;
i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
int data_off = i * ecc->size;
int oob_off = i * (ecc->bytes + 4);
const u8 *data = buf + data_off;
const u8 *oob = nand->oob_poi + oob_off;
ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob,
oob_off + mtd->writesize,
&cur_off, !i, page);
if (ret)
return ret;
}
sunxi_nfc_hw_ecc_disable(nand);
return nand_prog_page_end_op(nand);
}
static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *nand,
const u8 *buf,
int oob_required,
int page)
{
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
struct scatterlist sg;
u32 wait;
int ret, i;
sunxi_nfc_select_chip(nand, nand->cur_cs);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
return ret;
ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, ecc->steps,
DMA_TO_DEVICE, &sg);
if (ret)
goto pio_fallback;
for (i = 0; i < ecc->steps; i++) {
const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, i, !i, page);
}
nand_prog_page_begin_op(nand, page, 0, NULL, 0);
sunxi_nfc_hw_ecc_enable(nand);
sunxi_nfc_randomizer_config(nand, page, false);
sunxi_nfc_randomizer_enable(nand);
writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
nfc->regs + NFC_REG_WCMD_SET);
wait = NFC_CMD_INT_FLAG;
if (nfc->caps->has_mdma)
wait |= NFC_DMA_INT_FLAG;
else
dma_async_issue_pending(nfc->dmac);
writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD |
NFC_DATA_TRANS | NFC_ACCESS_DIR,
nfc->regs + NFC_REG_CMD);
ret = sunxi_nfc_wait_events(nfc, wait, false, 0);
if (ret && !nfc->caps->has_mdma)
dmaengine_terminate_all(nfc->dmac);
sunxi_nfc_randomizer_disable(nand);
sunxi_nfc_hw_ecc_disable(nand);
sunxi_nfc_dma_op_cleanup(nfc, DMA_TO_DEVICE, &sg);
if (ret)
return ret;
if (oob_required || (nand->options & NAND_NEED_SCRAMBLING))
/* TODO: use DMA to transfer extra OOB bytes ? */
sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi,
NULL, page);
return nand_prog_page_end_op(nand);
pio_fallback:
return sunxi_nfc_hw_ecc_write_page(nand, buf, oob_required, page);
}
static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *nand, int page)
{
u8 *buf = nand_get_data_buf(nand);
return nand->ecc.read_page(nand, buf, 1, page);
}
static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *nand, int page)
{
struct mtd_info *mtd = nand_to_mtd(nand);
u8 *buf = nand_get_data_buf(nand);
int ret;
memset(buf, 0xff, mtd->writesize);
ret = nand->ecc.write_page(nand, buf, 1, page);
if (ret)
return ret;
/* Send command to program the OOB data */
return nand_prog_page_end_op(nand);
}
static const s32 tWB_lut[] = {6, 12, 16, 20};
static const s32 tRHW_lut[] = {4, 8, 12, 20};
static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
u32 clk_period)
{
u32 clk_cycles = DIV_ROUND_UP(duration, clk_period);
int i;
for (i = 0; i < lut_size; i++) {
if (clk_cycles <= lut[i])
return i;
}
/* Doesn't fit */
return -EINVAL;
}
#define sunxi_nand_lookup_timing(l, p, c) \
_sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
static int sunxi_nfc_setup_interface(struct nand_chip *nand, int csline,
const struct nand_interface_config *conf)
{
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
const struct nand_sdr_timings *timings;
u32 min_clk_period = 0;
s32 tWB, tADL, tWHR, tRHW, tCAD;
long real_clk_rate;
timings = nand_get_sdr_timings(conf);
if (IS_ERR(timings))
return -ENOTSUPP;
/* T1 <=> tCLS */
if (timings->tCLS_min > min_clk_period)
min_clk_period = timings->tCLS_min;
/* T2 <=> tCLH */
if (timings->tCLH_min > min_clk_period)
min_clk_period = timings->tCLH_min;
/* T3 <=> tCS */
if (timings->tCS_min > min_clk_period)
min_clk_period = timings->tCS_min;
/* T4 <=> tCH */
if (timings->tCH_min > min_clk_period)
min_clk_period = timings->tCH_min;
/* T5 <=> tWP */
if (timings->tWP_min > min_clk_period)
min_clk_period = timings->tWP_min;
/* T6 <=> tWH */
if (timings->tWH_min > min_clk_period)
min_clk_period = timings->tWH_min;
/* T7 <=> tALS */
if (timings->tALS_min > min_clk_period)
min_clk_period = timings->tALS_min;
/* T8 <=> tDS */
if (timings->tDS_min > min_clk_period)
min_clk_period = timings->tDS_min;
/* T9 <=> tDH */
if (timings->tDH_min > min_clk_period)
min_clk_period = timings->tDH_min;
/* T10 <=> tRR */
if (timings->tRR_min > (min_clk_period * 3))
min_clk_period = DIV_ROUND_UP(timings->tRR_min, 3);
/* T11 <=> tALH */
if (timings->tALH_min > min_clk_period)
min_clk_period = timings->tALH_min;
/* T12 <=> tRP */
if (timings->tRP_min > min_clk_period)
min_clk_period = timings->tRP_min;
/* T13 <=> tREH */
if (timings->tREH_min > min_clk_period)
min_clk_period = timings->tREH_min;
/* T14 <=> tRC */
if (timings->tRC_min > (min_clk_period * 2))
min_clk_period = DIV_ROUND_UP(timings->tRC_min, 2);
/* T15 <=> tWC */
if (timings->tWC_min > (min_clk_period * 2))
min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
/* T16 - T19 + tCAD */
if (timings->tWB_max > (min_clk_period * 20))
min_clk_period = DIV_ROUND_UP(timings->tWB_max, 20);
if (timings->tADL_min > (min_clk_period * 32))
min_clk_period = DIV_ROUND_UP(timings->tADL_min, 32);
if (timings->tWHR_min > (min_clk_period * 32))
min_clk_period = DIV_ROUND_UP(timings->tWHR_min, 32);
if (timings->tRHW_min > (min_clk_period * 20))
min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20);
/*
* In non-EDO, tREA should be less than tRP to guarantee that the
* controller does not sample the IO lines too early. Unfortunately,
* the sunxi NAND controller does not allow us to have different
* values for tRP and tREH (tRP = tREH = tRW / 2).
*
* We have 2 options to overcome this limitation:
*
* 1/ Extend tRC to fulfil the tREA <= tRC / 2 constraint
* 2/ Use EDO mode (only works if timings->tRLOH > 0)
*/
if (timings->tREA_max > min_clk_period && !timings->tRLOH_min)
min_clk_period = timings->tREA_max;
tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
min_clk_period);
if (tWB < 0) {
dev_err(nfc->dev, "unsupported tWB\n");
return tWB;
}
tADL = DIV_ROUND_UP(timings->tADL_min, min_clk_period) >> 3;
if (tADL > 3) {
dev_err(nfc->dev, "unsupported tADL\n");
return -EINVAL;
}
tWHR = DIV_ROUND_UP(timings->tWHR_min, min_clk_period) >> 3;
if (tWHR > 3) {
dev_err(nfc->dev, "unsupported tWHR\n");
return -EINVAL;
}
tRHW = sunxi_nand_lookup_timing(tRHW_lut, timings->tRHW_min,
min_clk_period);
if (tRHW < 0) {
dev_err(nfc->dev, "unsupported tRHW\n");
return tRHW;
}
if (csline == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
/*
* TODO: according to ONFI specs this value only applies for DDR NAND,
* but Allwinner seems to set this to 0x7. Mimic them for now.
*/
tCAD = 0x7;
/* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
sunxi_nand->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
/* Convert min_clk_period from picoseconds to nanoseconds */
min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
/*
* Unlike what is stated in Allwinner datasheet, the clk_rate should
* be set to (1 / min_clk_period), and not (2 / min_clk_period).
* This new formula was verified with a scope and validated by
* Allwinner engineers.
*/
sunxi_nand->clk_rate = NSEC_PER_SEC / min_clk_period;
real_clk_rate = clk_round_rate(nfc->mod_clk, sunxi_nand->clk_rate);
if (real_clk_rate <= 0) {
dev_err(nfc->dev, "Unable to round clk %lu\n",
sunxi_nand->clk_rate);
return -EINVAL;
}
sunxi_nand->timing_ctl = 0;
/*
* ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
* output cycle timings shall be used if the host drives tRC less than
* 30 ns. We should also use EDO mode if tREA is bigger than tRP.
*/
min_clk_period = NSEC_PER_SEC / real_clk_rate;
if (min_clk_period * 2 < 30 || min_clk_period * 1000 < timings->tREA_max)
sunxi_nand->timing_ctl = NFC_TIMING_CTL_EDO;
return 0;
}
static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &nand->ecc;
if (section >= ecc->steps)
return -ERANGE;
oobregion->offset = section * (ecc->bytes + 4) + 4;
oobregion->length = ecc->bytes;
return 0;
}
static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &nand->ecc;
if (section > ecc->steps)
return -ERANGE;
/*
* The first 2 bytes are used for BB markers, hence we
* only have 2 bytes available in the first user data
* section.
*/
if (!section && ecc->engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
oobregion->offset = 2;
oobregion->length = 2;
return 0;
}
/*
* The controller does not provide access to OOB bytes
* past the end of the ECC data.
*/
if (section == ecc->steps && ecc->engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
return -ERANGE;
oobregion->offset = section * (ecc->bytes + 4);
if (section < ecc->steps)
oobregion->length = 4;
else
oobregion->length = mtd->oobsize - oobregion->offset;
return 0;
}
static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = {
.ecc = sunxi_nand_ooblayout_ecc,
.free = sunxi_nand_ooblayout_free,
};
static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
struct nand_ecc_ctrl *ecc,
struct device_node *np)
{
static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct mtd_info *mtd = nand_to_mtd(nand);
struct nand_device *nanddev = mtd_to_nanddev(mtd);
int nsectors;
int i;
if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) {
int bytes;
ecc->size = 1024;
nsectors = mtd->writesize / ecc->size;
/* Reserve 2 bytes for the BBM */
bytes = (mtd->oobsize - 2) / nsectors;
/* 4 non-ECC bytes are added before each ECC bytes section */
bytes -= 4;
/* and bytes has to be even. */
if (bytes % 2)
bytes--;
ecc->strength = bytes * 8 / fls(8 * ecc->size);
for (i = 0; i < ARRAY_SIZE(strengths); i++) {
if (strengths[i] > ecc->strength)
break;
}
if (!i)
ecc->strength = 0;
else
ecc->strength = strengths[i - 1];
}
if (ecc->size != 512 && ecc->size != 1024)
return -EINVAL;
/* Prefer 1k ECC chunk over 512 ones */
if (ecc->size == 512 && mtd->writesize > 512) {
ecc->size = 1024;
ecc->strength *= 2;
}
/* Add ECC info retrieval from DT */
for (i = 0; i < ARRAY_SIZE(strengths); i++) {
if (ecc->strength <= strengths[i]) {
/*
* Update ecc->strength value with the actual strength
* that will be used by the ECC engine.
*/
ecc->strength = strengths[i];
break;
}
}
if (i >= ARRAY_SIZE(strengths)) {
dev_err(nfc->dev, "unsupported strength\n");
return -ENOTSUPP;
}
/* HW ECC always request ECC bytes for 1024 bytes blocks */
ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8);
/* HW ECC always work with even numbers of ECC bytes */
ecc->bytes = ALIGN(ecc->bytes, 2);
nsectors = mtd->writesize / ecc->size;
if (mtd->oobsize < ((ecc->bytes + 4) * nsectors))
return -EINVAL;
ecc->read_oob = sunxi_nfc_hw_ecc_read_oob;
ecc->write_oob = sunxi_nfc_hw_ecc_write_oob;
mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops);
if (nfc->dmac || nfc->caps->has_mdma) {
ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
nand->options |= NAND_USES_DMA;
} else {
ecc->read_page = sunxi_nfc_hw_ecc_read_page;
ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
ecc->write_page = sunxi_nfc_hw_ecc_write_page;
}
/* TODO: support DMA for raw accesses and subpage write */
ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage;
ecc->read_oob_raw = nand_read_oob_std;
ecc->write_oob_raw = nand_write_oob_std;
sunxi_nand->ecc.ecc_ctl = NFC_ECC_MODE(i) | NFC_ECC_EXCEPTION |
NFC_ECC_PIPELINE | NFC_ECC_EN;
if (ecc->size == 512)
sunxi_nand->ecc.ecc_ctl |= NFC_ECC_BLOCK_512;
return 0;
}
static int sunxi_nand_attach_chip(struct nand_chip *nand)
{
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(&nand->base);
struct nand_ecc_ctrl *ecc = &nand->ecc;
struct device_node *np = nand_get_flash_node(nand);
int ret;
if (nand->bbt_options & NAND_BBT_USE_FLASH)
nand->bbt_options |= NAND_BBT_NO_OOB;
if (nand->options & NAND_NEED_SCRAMBLING)
nand->options |= NAND_NO_SUBPAGE_WRITE;
nand->options |= NAND_SUBPAGE_READ;
if (!ecc->size) {
ecc->size = requirements->step_size;
ecc->strength = requirements->strength;
}
if (!ecc->size || !ecc->strength)
return -EINVAL;
switch (ecc->engine_type) {
case NAND_ECC_ENGINE_TYPE_ON_HOST:
ret = sunxi_nand_hw_ecc_ctrl_init(nand, ecc, np);
if (ret)
return ret;
break;
case NAND_ECC_ENGINE_TYPE_NONE:
case NAND_ECC_ENGINE_TYPE_SOFT:
break;
default:
return -EINVAL;
}
return 0;
}
static int sunxi_nfc_exec_subop(struct nand_chip *nand,
const struct nand_subop *subop)
{
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
u32 cmd = 0, extcmd = 0, cnt = 0, addrs[2] = { };
unsigned int i, j, remaining, start;
void *inbuf = NULL;
int ret;
for (i = 0; i < subop->ninstrs; i++) {
const struct nand_op_instr *instr = &subop->instrs[i];
switch (instr->type) {
case NAND_OP_CMD_INSTR:
if (cmd & NFC_SEND_CMD1) {
if (WARN_ON(cmd & NFC_SEND_CMD2))
return -EINVAL;
cmd |= NFC_SEND_CMD2;
extcmd |= instr->ctx.cmd.opcode;
} else {
cmd |= NFC_SEND_CMD1 |
NFC_CMD(instr->ctx.cmd.opcode);
}
break;
case NAND_OP_ADDR_INSTR:
remaining = nand_subop_get_num_addr_cyc(subop, i);
start = nand_subop_get_addr_start_off(subop, i);
for (j = 0; j < 8 && j + start < remaining; j++) {
u32 addr = instr->ctx.addr.addrs[j + start];
addrs[j / 4] |= addr << (j % 4) * 8;
}
if (j)
cmd |= NFC_SEND_ADR | NFC_ADR_NUM(j);
break;
case NAND_OP_DATA_IN_INSTR:
case NAND_OP_DATA_OUT_INSTR:
start = nand_subop_get_data_start_off(subop, i);
remaining = nand_subop_get_data_len(subop, i);
cnt = min_t(u32, remaining, NFC_SRAM_SIZE);
cmd |= NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
if (instr->type == NAND_OP_DATA_OUT_INSTR) {
cmd |= NFC_ACCESS_DIR;
memcpy_toio(nfc->regs + NFC_RAM0_BASE,
instr->ctx.data.buf.out + start,
cnt);
} else {
inbuf = instr->ctx.data.buf.in + start;
}
break;
case NAND_OP_WAITRDY_INSTR:
cmd |= NFC_WAIT_FLAG;
break;
}
}
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
return ret;
if (cmd & NFC_SEND_ADR) {
writel(addrs[0], nfc->regs + NFC_REG_ADDR_LOW);
writel(addrs[1], nfc->regs + NFC_REG_ADDR_HIGH);
}
if (cmd & NFC_SEND_CMD2)
writel(extcmd,
nfc->regs +
(cmd & NFC_ACCESS_DIR ?
NFC_REG_WCMD_SET : NFC_REG_RCMD_SET));
if (cmd & NFC_DATA_TRANS)
writel(cnt, nfc->regs + NFC_REG_CNT);
writel(cmd, nfc->regs + NFC_REG_CMD);
ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG,
!(cmd & NFC_WAIT_FLAG) && cnt < 64,
0);
if (ret)
return ret;
if (inbuf)
memcpy_fromio(inbuf, nfc->regs + NFC_RAM0_BASE, cnt);
return 0;
}
static int sunxi_nfc_soft_waitrdy(struct nand_chip *nand,
const struct nand_subop *subop)
{
return nand_soft_waitrdy(nand,
subop->instrs[0].ctx.waitrdy.timeout_ms);
}
static const struct nand_op_parser sunxi_nfc_op_parser = NAND_OP_PARSER(
NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)),
NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024),
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
);
static const struct nand_op_parser sunxi_nfc_norb_op_parser = NAND_OP_PARSER(
NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)),
NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop,
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024),
NAND_OP_PARSER_PAT_CMD_ELEM(true)),
NAND_OP_PARSER_PATTERN(sunxi_nfc_soft_waitrdy,
NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
);
static int sunxi_nfc_exec_op(struct nand_chip *nand,
const struct nand_operation *op, bool check_only)
{
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
const struct nand_op_parser *parser;
if (!check_only)
sunxi_nfc_select_chip(nand, op->cs);
if (sunxi_nand->sels[op->cs].rb >= 0)
parser = &sunxi_nfc_op_parser;
else
parser = &sunxi_nfc_norb_op_parser;
return nand_op_parser_exec_op(nand, parser, op, check_only);
}
static const struct nand_controller_ops sunxi_nand_controller_ops = {
.attach_chip = sunxi_nand_attach_chip,
.setup_interface = sunxi_nfc_setup_interface,
.exec_op = sunxi_nfc_exec_op,
};
static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
{
struct sunxi_nand_chip *sunxi_nand;
struct nand_chip *chip;
int ret;
while (!list_empty(&nfc->chips)) {
sunxi_nand = list_first_entry(&nfc->chips,
struct sunxi_nand_chip,
node);
chip = &sunxi_nand->nand;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
list_del(&sunxi_nand->node);
}
}
static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
struct device_node *np)
{
struct sunxi_nand_chip *sunxi_nand;
struct mtd_info *mtd;
struct nand_chip *nand;
int nsels;
int ret;
int i;
u32 tmp;
if (!of_get_property(np, "reg", &nsels))
return -EINVAL;
nsels /= sizeof(u32);
if (!nsels) {
dev_err(dev, "invalid reg property size\n");
return -EINVAL;
}
sunxi_nand = devm_kzalloc(dev, struct_size(sunxi_nand, sels, nsels),
GFP_KERNEL);
if (!sunxi_nand)
return -ENOMEM;
sunxi_nand->nsels = nsels;
for (i = 0; i < nsels; i++) {
ret = of_property_read_u32_index(np, "reg", i, &tmp);
if (ret) {
dev_err(dev, "could not retrieve reg property: %d\n",
ret);
return ret;
}
if (tmp > NFC_MAX_CS) {
dev_err(dev,
"invalid reg value: %u (max CS = 7)\n",
tmp);
return -EINVAL;
}
if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
dev_err(dev, "CS %d already assigned\n", tmp);
return -EINVAL;
}
sunxi_nand->sels[i].cs = tmp;
if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
tmp < 2)
sunxi_nand->sels[i].rb = tmp;
else
sunxi_nand->sels[i].rb = -1;
}
nand = &sunxi_nand->nand;
/* Default tR value specified in the ONFI spec (chapter 4.15.1) */
nand->controller = &nfc->controller;
nand->controller->ops = &sunxi_nand_controller_ops;
/*
* Set the ECC mode to the default value in case nothing is specified
* in the DT.
*/
nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
nand_set_flash_node(nand, np);
mtd = nand_to_mtd(nand);
mtd->dev.parent = dev;
ret = nand_scan(nand, nsels);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "failed to register mtd device: %d\n", ret);
nand_cleanup(nand);
return ret;
}
list_add_tail(&sunxi_nand->node, &nfc->chips);
return 0;
}
static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
{
struct device_node *np = dev->of_node;
struct device_node *nand_np;
int ret;
for_each_child_of_node(np, nand_np) {
ret = sunxi_nand_chip_init(dev, nfc, nand_np);
if (ret) {
of_node_put(nand_np);
sunxi_nand_chips_cleanup(nfc);
return ret;
}
}
return 0;
}
static int sunxi_nfc_dma_init(struct sunxi_nfc *nfc, struct resource *r)
{
int ret;
if (nfc->caps->has_mdma)
return 0;
nfc->dmac = dma_request_chan(nfc->dev, "rxtx");
if (IS_ERR(nfc->dmac)) {
ret = PTR_ERR(nfc->dmac);
if (ret == -EPROBE_DEFER)
return ret;
/* Ignore errors to fall back to PIO mode */
dev_warn(nfc->dev, "failed to request rxtx DMA channel: %d\n", ret);
nfc->dmac = NULL;
} else {
struct dma_slave_config dmac_cfg = { };
dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data;
dmac_cfg.dst_addr = dmac_cfg.src_addr;
dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
dmac_cfg.src_maxburst = nfc->caps->dma_maxburst;
dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst;
dmaengine_slave_config(nfc->dmac, &dmac_cfg);
}
return 0;
}
static int sunxi_nfc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *r;
struct sunxi_nfc *nfc;
int irq;
int ret;
nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
if (!nfc)
return -ENOMEM;
nfc->dev = dev;
nand_controller_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
nfc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(nfc->regs))
return PTR_ERR(nfc->regs);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
nfc->ahb_clk = devm_clk_get_enabled(dev, "ahb");
if (IS_ERR(nfc->ahb_clk)) {
dev_err(dev, "failed to retrieve ahb clk\n");
return PTR_ERR(nfc->ahb_clk);
}
nfc->mod_clk = devm_clk_get_enabled(dev, "mod");
if (IS_ERR(nfc->mod_clk)) {
dev_err(dev, "failed to retrieve mod clk\n");
return PTR_ERR(nfc->mod_clk);
}
nfc->reset = devm_reset_control_get_optional_exclusive(dev, "ahb");
if (IS_ERR(nfc->reset))
return PTR_ERR(nfc->reset);
ret = reset_control_deassert(nfc->reset);
if (ret) {
dev_err(dev, "reset err %d\n", ret);
return ret;
}
nfc->caps = of_device_get_match_data(&pdev->dev);
if (!nfc->caps) {
ret = -EINVAL;
goto out_ahb_reset_reassert;
}
ret = sunxi_nfc_rst(nfc);
if (ret)
goto out_ahb_reset_reassert;
writel(0, nfc->regs + NFC_REG_INT);
ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt,
0, "sunxi-nand", nfc);
if (ret)
goto out_ahb_reset_reassert;
ret = sunxi_nfc_dma_init(nfc, r);
if (ret)
goto out_ahb_reset_reassert;
platform_set_drvdata(pdev, nfc);
ret = sunxi_nand_chips_init(dev, nfc);
if (ret) {
dev_err(dev, "failed to init nand chips\n");
goto out_release_dmac;
}
return 0;
out_release_dmac:
if (nfc->dmac)
dma_release_channel(nfc->dmac);
out_ahb_reset_reassert:
reset_control_assert(nfc->reset);
return ret;
}
static void sunxi_nfc_remove(struct platform_device *pdev)
{
struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
sunxi_nand_chips_cleanup(nfc);
reset_control_assert(nfc->reset);
if (nfc->dmac)
dma_release_channel(nfc->dmac);
}
static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = {
.reg_io_data = NFC_REG_A10_IO_DATA,
.dma_maxburst = 4,
};
static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = {
.has_mdma = true,
.reg_io_data = NFC_REG_A23_IO_DATA,
.dma_maxburst = 8,
};
static const struct of_device_id sunxi_nfc_ids[] = {
{
.compatible = "allwinner,sun4i-a10-nand",
.data = &sunxi_nfc_a10_caps,
},
{
.compatible = "allwinner,sun8i-a23-nand-controller",
.data = &sunxi_nfc_a23_caps,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sunxi_nfc_ids);
static struct platform_driver sunxi_nfc_driver = {
.driver = {
.name = "sunxi_nand",
.of_match_table = sunxi_nfc_ids,
},
.probe = sunxi_nfc_probe,
.remove_new = sunxi_nfc_remove,
};
module_platform_driver(sunxi_nfc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Boris BREZILLON");
MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
MODULE_ALIAS("platform:sunxi_nand");
| linux-master | drivers/mtd/nand/raw/sunxi_nand.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Toradex AG
*
* Author: Marcel Ziswiler <[email protected]>
*/
#include <linux/mtd/rawnand.h>
#include "internals.h"
static void esmt_nand_decode_id(struct nand_chip *chip)
{
struct nand_device *base = &chip->base;
struct nand_ecc_props requirements = {};
nand_decode_ext_id(chip);
/* Extract ECC requirements from 5th id byte. */
if (chip->id.len >= 5 && nand_is_slc(chip)) {
requirements.step_size = 512;
switch (chip->id.data[4] & 0x3) {
case 0x0:
requirements.strength = 4;
break;
case 0x1:
requirements.strength = 2;
break;
case 0x2:
requirements.strength = 1;
break;
default:
WARN(1, "Could not get ECC info");
requirements.step_size = 0;
break;
}
}
nanddev_set_ecc_requirements(base, &requirements);
}
static int esmt_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
/*
* It is known that some ESMT SLC NANDs have been shipped
* with the factory bad block markers in the first or last page
* of the block, instead of the first or second page. To be on
* the safe side, let's check all three locations.
*/
chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE |
NAND_BBM_LASTPAGE;
return 0;
}
const struct nand_manufacturer_ops esmt_nand_manuf_ops = {
.detect = esmt_nand_decode_id,
.init = esmt_nand_init,
};
| linux-master | drivers/mtd/nand/raw/nand_esmt.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* MTK NAND Flash controller driver.
* Copyright (C) 2016 MediaTek Inc.
* Authors: Xiaolei Li <[email protected]>
* Jorge Ramirez-Ortiz <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/mtd.h>
#include <linux/module.h>
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/mtd/nand-ecc-mtk.h>
/* NAND controller register definition */
#define NFI_CNFG (0x00)
#define CNFG_AHB BIT(0)
#define CNFG_READ_EN BIT(1)
#define CNFG_DMA_BURST_EN BIT(2)
#define CNFG_BYTE_RW BIT(6)
#define CNFG_HW_ECC_EN BIT(8)
#define CNFG_AUTO_FMT_EN BIT(9)
#define CNFG_OP_CUST (6 << 12)
#define NFI_PAGEFMT (0x04)
#define PAGEFMT_FDM_ECC_SHIFT (12)
#define PAGEFMT_FDM_SHIFT (8)
#define PAGEFMT_SEC_SEL_512 BIT(2)
#define PAGEFMT_512_2K (0)
#define PAGEFMT_2K_4K (1)
#define PAGEFMT_4K_8K (2)
#define PAGEFMT_8K_16K (3)
/* NFI control */
#define NFI_CON (0x08)
#define CON_FIFO_FLUSH BIT(0)
#define CON_NFI_RST BIT(1)
#define CON_BRD BIT(8) /* burst read */
#define CON_BWR BIT(9) /* burst write */
#define CON_SEC_SHIFT (12)
/* Timming control register */
#define NFI_ACCCON (0x0C)
#define NFI_INTR_EN (0x10)
#define INTR_AHB_DONE_EN BIT(6)
#define NFI_INTR_STA (0x14)
#define NFI_CMD (0x20)
#define NFI_ADDRNOB (0x30)
#define NFI_COLADDR (0x34)
#define NFI_ROWADDR (0x38)
#define NFI_STRDATA (0x40)
#define STAR_EN (1)
#define STAR_DE (0)
#define NFI_CNRNB (0x44)
#define NFI_DATAW (0x50)
#define NFI_DATAR (0x54)
#define NFI_PIO_DIRDY (0x58)
#define PIO_DI_RDY (0x01)
#define NFI_STA (0x60)
#define STA_CMD BIT(0)
#define STA_ADDR BIT(1)
#define STA_BUSY BIT(8)
#define STA_EMP_PAGE BIT(12)
#define NFI_FSM_CUSTDATA (0xe << 16)
#define NFI_FSM_MASK (0xf << 16)
#define NFI_ADDRCNTR (0x70)
#define CNTR_MASK GENMASK(16, 12)
#define ADDRCNTR_SEC_SHIFT (12)
#define ADDRCNTR_SEC(val) \
(((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
#define NFI_STRADDR (0x80)
#define NFI_BYTELEN (0x84)
#define NFI_CSEL (0x90)
#define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2)
#define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2)
#define NFI_FDM_MAX_SIZE (8)
#define NFI_FDM_MIN_SIZE (1)
#define NFI_DEBUG_CON1 (0x220)
#define STROBE_MASK GENMASK(4, 3)
#define STROBE_SHIFT (3)
#define MAX_STROBE_DLY (3)
#define NFI_MASTER_STA (0x224)
#define MASTER_STA_MASK (0x0FFF)
#define NFI_EMPTY_THRESH (0x23C)
#define MTK_NAME "mtk-nand"
#define KB(x) ((x) * 1024UL)
#define MB(x) (KB(x) * 1024UL)
#define MTK_TIMEOUT (500000)
#define MTK_RESET_TIMEOUT (1000000)
#define MTK_NAND_MAX_NSELS (2)
#define MTK_NFC_MIN_SPARE (16)
#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
((tpoecs) << 28 | (tprecs) << 22 | (tc2r) << 16 | \
(tw2r) << 12 | (twh) << 8 | (twst) << 4 | (trlt))
struct mtk_nfc_caps {
const u8 *spare_size;
u8 num_spare_size;
u8 pageformat_spare_shift;
u8 nfi_clk_div;
u8 max_sector;
u32 max_sector_size;
};
struct mtk_nfc_bad_mark_ctl {
void (*bm_swap)(struct mtd_info *, u8 *buf, int raw);
u32 sec;
u32 pos;
};
/*
* FDM: region used to store free OOB data
*/
struct mtk_nfc_fdm {
u32 reg_size;
u32 ecc_size;
};
struct mtk_nfc_nand_chip {
struct list_head node;
struct nand_chip nand;
struct mtk_nfc_bad_mark_ctl bad_mark;
struct mtk_nfc_fdm fdm;
u32 spare_per_sector;
int nsels;
u8 sels[];
/* nothing after this field */
};
struct mtk_nfc_clk {
struct clk *nfi_clk;
struct clk *pad_clk;
};
struct mtk_nfc {
struct nand_controller controller;
struct mtk_ecc_config ecc_cfg;
struct mtk_nfc_clk clk;
struct mtk_ecc *ecc;
struct device *dev;
const struct mtk_nfc_caps *caps;
void __iomem *regs;
struct completion done;
struct list_head chips;
u8 *buffer;
unsigned long assigned_cs;
};
/*
* supported spare size of each IP.
* order should be the same with the spare size bitfiled defination of
* register NFI_PAGEFMT.
*/
static const u8 spare_size_mt2701[] = {
16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 63, 64
};
static const u8 spare_size_mt2712[] = {
16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64, 67,
74
};
static const u8 spare_size_mt7622[] = {
16, 26, 27, 28
};
static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
{
return container_of(nand, struct mtk_nfc_nand_chip, nand);
}
static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i)
{
return (u8 *)p + i * chip->ecc.size;
}
static inline u8 *oob_ptr(struct nand_chip *chip, int i)
{
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
u8 *poi;
/* map the sector's FDM data to free oob:
* the beginning of the oob area stores the FDM data of bad mark sectors
*/
if (i < mtk_nand->bad_mark.sec)
poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size;
else if (i == mtk_nand->bad_mark.sec)
poi = chip->oob_poi;
else
poi = chip->oob_poi + i * mtk_nand->fdm.reg_size;
return poi;
}
static inline int mtk_data_len(struct nand_chip *chip)
{
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
return chip->ecc.size + mtk_nand->spare_per_sector;
}
static inline u8 *mtk_data_ptr(struct nand_chip *chip, int i)
{
struct mtk_nfc *nfc = nand_get_controller_data(chip);
return nfc->buffer + i * mtk_data_len(chip);
}
static inline u8 *mtk_oob_ptr(struct nand_chip *chip, int i)
{
struct mtk_nfc *nfc = nand_get_controller_data(chip);
return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size;
}
static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg)
{
writel(val, nfc->regs + reg);
}
static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg)
{
writew(val, nfc->regs + reg);
}
static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg)
{
writeb(val, nfc->regs + reg);
}
static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg)
{
return readl_relaxed(nfc->regs + reg);
}
static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg)
{
return readw_relaxed(nfc->regs + reg);
}
static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg)
{
return readb_relaxed(nfc->regs + reg);
}
static void mtk_nfc_hw_reset(struct mtk_nfc *nfc)
{
struct device *dev = nfc->dev;
u32 val;
int ret;
/* reset all registers and force the NFI master to terminate */
nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
/* wait for the master to finish the last transaction */
ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val,
!(val & MASTER_STA_MASK), 50,
MTK_RESET_TIMEOUT);
if (ret)
dev_warn(dev, "master active in reset [0x%x] = 0x%x\n",
NFI_MASTER_STA, val);
/* ensure any status register affected by the NFI master is reset */
nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
nfi_writew(nfc, STAR_DE, NFI_STRDATA);
}
static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command)
{
struct device *dev = nfc->dev;
u32 val;
int ret;
nfi_writel(nfc, command, NFI_CMD);
ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
!(val & STA_CMD), 10, MTK_TIMEOUT);
if (ret) {
dev_warn(dev, "nfi core timed out entering command mode\n");
return -EIO;
}
return 0;
}
static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr)
{
struct device *dev = nfc->dev;
u32 val;
int ret;
nfi_writel(nfc, addr, NFI_COLADDR);
nfi_writel(nfc, 0, NFI_ROWADDR);
nfi_writew(nfc, 1, NFI_ADDRNOB);
ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
!(val & STA_ADDR), 10, MTK_TIMEOUT);
if (ret) {
dev_warn(dev, "nfi core timed out entering address mode\n");
return -EIO;
}
return 0;
}
static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
struct mtk_nfc *nfc = nand_get_controller_data(chip);
u32 fmt, spare, i;
if (!mtd->writesize)
return 0;
spare = mtk_nand->spare_per_sector;
switch (mtd->writesize) {
case 512:
fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
break;
case KB(2):
if (chip->ecc.size == 512)
fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
else
fmt = PAGEFMT_512_2K;
break;
case KB(4):
if (chip->ecc.size == 512)
fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
else
fmt = PAGEFMT_2K_4K;
break;
case KB(8):
if (chip->ecc.size == 512)
fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
else
fmt = PAGEFMT_4K_8K;
break;
case KB(16):
fmt = PAGEFMT_8K_16K;
break;
default:
dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize);
return -EINVAL;
}
/*
* the hardware will double the value for this eccsize, so we need to
* halve it
*/
if (chip->ecc.size == 1024)
spare >>= 1;
for (i = 0; i < nfc->caps->num_spare_size; i++) {
if (nfc->caps->spare_size[i] == spare)
break;
}
if (i == nfc->caps->num_spare_size) {
dev_err(nfc->dev, "invalid spare size %d\n", spare);
return -EINVAL;
}
fmt |= i << nfc->caps->pageformat_spare_shift;
fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
nfi_writel(nfc, fmt, NFI_PAGEFMT);
nfc->ecc_cfg.strength = chip->ecc.strength;
nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size;
return 0;
}
static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
{
int rc;
u8 val;
rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val,
val & PIO_DI_RDY, 10, MTK_TIMEOUT);
if (rc < 0)
dev_err(nfc->dev, "data not ready\n");
}
static inline u8 mtk_nfc_read_byte(struct nand_chip *chip)
{
struct mtk_nfc *nfc = nand_get_controller_data(chip);
u32 reg;
/* after each byte read, the NFI_STA reg is reset by the hardware */
reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
if (reg != NFI_FSM_CUSTDATA) {
reg = nfi_readw(nfc, NFI_CNFG);
reg |= CNFG_BYTE_RW | CNFG_READ_EN;
nfi_writew(nfc, reg, NFI_CNFG);
/*
* set to max sector to allow the HW to continue reading over
* unaligned accesses
*/
reg = (nfc->caps->max_sector << CON_SEC_SHIFT) | CON_BRD;
nfi_writel(nfc, reg, NFI_CON);
/* trigger to fetch data */
nfi_writew(nfc, STAR_EN, NFI_STRDATA);
}
mtk_nfc_wait_ioready(nfc);
return nfi_readb(nfc, NFI_DATAR);
}
static void mtk_nfc_read_buf(struct nand_chip *chip, u8 *buf, int len)
{
int i;
for (i = 0; i < len; i++)
buf[i] = mtk_nfc_read_byte(chip);
}
static void mtk_nfc_write_byte(struct nand_chip *chip, u8 byte)
{
struct mtk_nfc *nfc = nand_get_controller_data(chip);
u32 reg;
reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
if (reg != NFI_FSM_CUSTDATA) {
reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
nfi_writew(nfc, reg, NFI_CNFG);
reg = nfc->caps->max_sector << CON_SEC_SHIFT | CON_BWR;
nfi_writel(nfc, reg, NFI_CON);
nfi_writew(nfc, STAR_EN, NFI_STRDATA);
}
mtk_nfc_wait_ioready(nfc);
nfi_writeb(nfc, byte, NFI_DATAW);
}
static void mtk_nfc_write_buf(struct nand_chip *chip, const u8 *buf, int len)
{
int i;
for (i = 0; i < len; i++)
mtk_nfc_write_byte(chip, buf[i]);
}
static int mtk_nfc_exec_instr(struct nand_chip *chip,
const struct nand_op_instr *instr)
{
struct mtk_nfc *nfc = nand_get_controller_data(chip);
unsigned int i;
u32 status;
switch (instr->type) {
case NAND_OP_CMD_INSTR:
mtk_nfc_send_command(nfc, instr->ctx.cmd.opcode);
return 0;
case NAND_OP_ADDR_INSTR:
for (i = 0; i < instr->ctx.addr.naddrs; i++)
mtk_nfc_send_address(nfc, instr->ctx.addr.addrs[i]);
return 0;
case NAND_OP_DATA_IN_INSTR:
mtk_nfc_read_buf(chip, instr->ctx.data.buf.in,
instr->ctx.data.len);
return 0;
case NAND_OP_DATA_OUT_INSTR:
mtk_nfc_write_buf(chip, instr->ctx.data.buf.out,
instr->ctx.data.len);
return 0;
case NAND_OP_WAITRDY_INSTR:
return readl_poll_timeout(nfc->regs + NFI_STA, status,
!(status & STA_BUSY), 20,
instr->ctx.waitrdy.timeout_ms * 1000);
default:
break;
}
return -EINVAL;
}
static void mtk_nfc_select_target(struct nand_chip *nand, unsigned int cs)
{
struct mtk_nfc *nfc = nand_get_controller_data(nand);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
mtk_nfc_hw_runtime_config(nand_to_mtd(nand));
nfi_writel(nfc, mtk_nand->sels[cs], NFI_CSEL);
}
static int mtk_nfc_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
struct mtk_nfc *nfc = nand_get_controller_data(chip);
unsigned int i;
int ret = 0;
if (check_only)
return 0;
mtk_nfc_hw_reset(nfc);
nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
mtk_nfc_select_target(chip, op->cs);
for (i = 0; i < op->ninstrs; i++) {
ret = mtk_nfc_exec_instr(chip, &op->instrs[i]);
if (ret)
break;
}
return ret;
}
static int mtk_nfc_setup_interface(struct nand_chip *chip, int csline,
const struct nand_interface_config *conf)
{
struct mtk_nfc *nfc = nand_get_controller_data(chip);
const struct nand_sdr_timings *timings;
u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst = 0, trlt = 0;
u32 temp, tsel = 0;
timings = nand_get_sdr_timings(conf);
if (IS_ERR(timings))
return -ENOTSUPP;
if (csline == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
rate = clk_get_rate(nfc->clk.nfi_clk);
/* There is a frequency divider in some IPs */
rate /= nfc->caps->nfi_clk_div;
/* turn clock rate into KHZ */
rate /= 1000;
tpoecs = max(timings->tALH_min, timings->tCLH_min) / 1000;
tpoecs = DIV_ROUND_UP(tpoecs * rate, 1000000);
tpoecs &= 0xf;
tprecs = max(timings->tCLS_min, timings->tALS_min) / 1000;
tprecs = DIV_ROUND_UP(tprecs * rate, 1000000);
tprecs &= 0x3f;
/* sdr interface has no tCR which means CE# low to RE# low */
tc2r = 0;
tw2r = timings->tWHR_min / 1000;
tw2r = DIV_ROUND_UP(tw2r * rate, 1000000);
tw2r = DIV_ROUND_UP(tw2r - 1, 2);
tw2r &= 0xf;
twh = max(timings->tREH_min, timings->tWH_min) / 1000;
twh = DIV_ROUND_UP(twh * rate, 1000000) - 1;
twh &= 0xf;
/* Calculate real WE#/RE# hold time in nanosecond */
temp = (twh + 1) * 1000000 / rate;
/* nanosecond to picosecond */
temp *= 1000;
/*
* WE# low level time should be expaned to meet WE# pulse time
* and WE# cycle time at the same time.
*/
if (temp < timings->tWC_min)
twst = timings->tWC_min - temp;
twst = max(timings->tWP_min, twst) / 1000;
twst = DIV_ROUND_UP(twst * rate, 1000000) - 1;
twst &= 0xf;
/*
* RE# low level time should be expaned to meet RE# pulse time
* and RE# cycle time at the same time.
*/
if (temp < timings->tRC_min)
trlt = timings->tRC_min - temp;
trlt = max(trlt, timings->tRP_min) / 1000;
trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1;
trlt &= 0xf;
/* Calculate RE# pulse time in nanosecond. */
temp = (trlt + 1) * 1000000 / rate;
/* nanosecond to picosecond */
temp *= 1000;
/*
* If RE# access time is bigger than RE# pulse time,
* delay sampling data timing.
*/
if (temp < timings->tREA_max) {
tsel = timings->tREA_max / 1000;
tsel = DIV_ROUND_UP(tsel * rate, 1000000);
tsel -= (trlt + 1);
if (tsel > MAX_STROBE_DLY) {
trlt += tsel - MAX_STROBE_DLY;
tsel = MAX_STROBE_DLY;
}
}
temp = nfi_readl(nfc, NFI_DEBUG_CON1);
temp &= ~STROBE_MASK;
temp |= tsel << STROBE_SHIFT;
nfi_writel(nfc, temp, NFI_DEBUG_CON1);
/*
* ACCON: access timing control register
* -------------------------------------
* 31:28: tpoecs, minimum required time for CS post pulling down after
* accessing the device
* 27:22: tprecs, minimum required time for CS pre pulling down before
* accessing the device
* 21:16: tc2r, minimum required time from NCEB low to NREB low
* 15:12: tw2r, minimum required time from NWEB high to NREB low.
* 11:08: twh, write enable hold time
* 07:04: twst, write wait states
* 03:00: trlt, read wait states
*/
trlt = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt);
nfi_writel(nfc, trlt, NFI_ACCCON);
return 0;
}
static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data)
{
struct mtk_nfc *nfc = nand_get_controller_data(chip);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
int size = chip->ecc.size + mtk_nand->fdm.reg_size;
nfc->ecc_cfg.mode = ECC_DMA_MODE;
nfc->ecc_cfg.op = ECC_ENCODE;
return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size);
}
static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c)
{
/* nop */
}
static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, u8 *buf, int raw)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip);
u32 bad_pos = nand->bad_mark.pos;
if (raw)
bad_pos += nand->bad_mark.sec * mtk_data_len(chip);
else
bad_pos += nand->bad_mark.sec * chip->ecc.size;
swap(chip->oob_poi[0], buf[bad_pos]);
}
static int mtk_nfc_format_subpage(struct mtd_info *mtd, u32 offset,
u32 len, const u8 *buf)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
struct mtk_nfc *nfc = nand_get_controller_data(chip);
struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
u32 start, end;
int i, ret;
start = offset / chip->ecc.size;
end = DIV_ROUND_UP(offset + len, chip->ecc.size);
memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
for (i = 0; i < chip->ecc.steps; i++) {
memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
chip->ecc.size);
if (start > i || i >= end)
continue;
if (i == mtk_nand->bad_mark.sec)
mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
/* program the CRC back to the OOB */
ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i));
if (ret < 0)
return ret;
}
return 0;
}
static void mtk_nfc_format_page(struct mtd_info *mtd, const u8 *buf)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
struct mtk_nfc *nfc = nand_get_controller_data(chip);
struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
u32 i;
memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
for (i = 0; i < chip->ecc.steps; i++) {
if (buf)
memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
chip->ecc.size);
if (i == mtk_nand->bad_mark.sec)
mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
}
}
static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start,
u32 sectors)
{
struct mtk_nfc *nfc = nand_get_controller_data(chip);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
u32 vall, valm;
u8 *oobptr;
int i, j;
for (i = 0; i < sectors; i++) {
oobptr = oob_ptr(chip, start + i);
vall = nfi_readl(nfc, NFI_FDML(i));
valm = nfi_readl(nfc, NFI_FDMM(i));
for (j = 0; j < fdm->reg_size; j++)
oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
}
}
static inline void mtk_nfc_write_fdm(struct nand_chip *chip)
{
struct mtk_nfc *nfc = nand_get_controller_data(chip);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
u32 vall, valm;
u8 *oobptr;
int i, j;
for (i = 0; i < chip->ecc.steps; i++) {
oobptr = oob_ptr(chip, i);
vall = 0;
valm = 0;
for (j = 0; j < 8; j++) {
if (j < 4)
vall |= (j < fdm->reg_size ? oobptr[j] : 0xff)
<< (j * 8);
else
valm |= (j < fdm->reg_size ? oobptr[j] : 0xff)
<< ((j - 4) * 8);
}
nfi_writel(nfc, vall, NFI_FDML(i));
nfi_writel(nfc, valm, NFI_FDMM(i));
}
}
static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const u8 *buf, int page, int len)
{
struct mtk_nfc *nfc = nand_get_controller_data(chip);
struct device *dev = nfc->dev;
dma_addr_t addr;
u32 reg;
int ret;
addr = dma_map_single(dev, (void *)buf, len, DMA_TO_DEVICE);
ret = dma_mapping_error(nfc->dev, addr);
if (ret) {
dev_err(nfc->dev, "dma mapping error\n");
return -EINVAL;
}
reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN;
nfi_writew(nfc, reg, NFI_CNFG);
nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON);
nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
init_completion(&nfc->done);
reg = nfi_readl(nfc, NFI_CON) | CON_BWR;
nfi_writel(nfc, reg, NFI_CON);
nfi_writew(nfc, STAR_EN, NFI_STRDATA);
ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
if (!ret) {
dev_err(dev, "program ahb done timeout\n");
nfi_writew(nfc, 0, NFI_INTR_EN);
ret = -ETIMEDOUT;
goto timeout;
}
ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
ADDRCNTR_SEC(reg) >= chip->ecc.steps,
10, MTK_TIMEOUT);
if (ret)
dev_err(dev, "hwecc write timeout\n");
timeout:
dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE);
nfi_writel(nfc, 0, NFI_CON);
return ret;
}
static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const u8 *buf, int page, int raw)
{
struct mtk_nfc *nfc = nand_get_controller_data(chip);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
size_t len;
const u8 *bufpoi;
u32 reg;
int ret;
mtk_nfc_select_target(chip, chip->cur_cs);
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
if (!raw) {
/* OOB => FDM: from register, ECC: from HW */
reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG);
nfc->ecc_cfg.op = ECC_ENCODE;
nfc->ecc_cfg.mode = ECC_NFI_MODE;
ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
if (ret) {
/* clear NFI config */
reg = nfi_readw(nfc, NFI_CNFG);
reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
nfi_writew(nfc, reg, NFI_CNFG);
return ret;
}
memcpy(nfc->buffer, buf, mtd->writesize);
mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw);
bufpoi = nfc->buffer;
/* write OOB into the FDM registers (OOB area in MTK NAND) */
mtk_nfc_write_fdm(chip);
} else {
bufpoi = buf;
}
len = mtd->writesize + (raw ? mtd->oobsize : 0);
ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len);
if (!raw)
mtk_ecc_disable(nfc->ecc);
if (ret)
return ret;
return nand_prog_page_end_op(chip);
}
static int mtk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
int oob_on, int page)
{
return mtk_nfc_write_page(nand_to_mtd(chip), chip, buf, page, 0);
}
static int mtk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
int oob_on, int pg)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mtk_nfc *nfc = nand_get_controller_data(chip);
mtk_nfc_format_page(mtd, buf);
return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
}
static int mtk_nfc_write_subpage_hwecc(struct nand_chip *chip, u32 offset,
u32 data_len, const u8 *buf,
int oob_on, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mtk_nfc *nfc = nand_get_controller_data(chip);
int ret;
ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf);
if (ret < 0)
return ret;
/* use the data in the private buffer (now with FDM and CRC) */
return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
}
static int mtk_nfc_write_oob_std(struct nand_chip *chip, int page)
{
return mtk_nfc_write_page_raw(chip, NULL, 1, page);
}
static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
u32 sectors)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mtk_nfc *nfc = nand_get_controller_data(chip);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
struct mtk_ecc_stats stats;
u32 reg_size = mtk_nand->fdm.reg_size;
int rc, i;
rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
if (rc) {
memset(buf, 0xff, sectors * chip->ecc.size);
for (i = 0; i < sectors; i++)
memset(oob_ptr(chip, start + i), 0xff, reg_size);
return 0;
}
mtk_ecc_get_stats(nfc->ecc, &stats, sectors);
mtd->ecc_stats.corrected += stats.corrected;
mtd->ecc_stats.failed += stats.failed;
return stats.bitflips;
}
static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
u32 data_offs, u32 readlen,
u8 *bufpoi, int page, int raw)
{
struct mtk_nfc *nfc = nand_get_controller_data(chip);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
u32 spare = mtk_nand->spare_per_sector;
u32 column, sectors, start, end, reg;
dma_addr_t addr;
int bitflips = 0;
size_t len;
u8 *buf;
int rc;
mtk_nfc_select_target(chip, chip->cur_cs);
start = data_offs / chip->ecc.size;
end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
sectors = end - start;
column = start * (chip->ecc.size + spare);
len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
buf = bufpoi + start * chip->ecc.size;
nand_read_page_op(chip, page, column, NULL, 0);
addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
rc = dma_mapping_error(nfc->dev, addr);
if (rc) {
dev_err(nfc->dev, "dma mapping error\n");
return -EINVAL;
}
reg = nfi_readw(nfc, NFI_CNFG);
reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB;
if (!raw) {
reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
nfi_writew(nfc, reg, NFI_CNFG);
nfc->ecc_cfg.mode = ECC_NFI_MODE;
nfc->ecc_cfg.sectors = sectors;
nfc->ecc_cfg.op = ECC_DECODE;
rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
if (rc) {
dev_err(nfc->dev, "ecc enable\n");
/* clear NFI_CNFG */
reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN |
CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
nfi_writew(nfc, reg, NFI_CNFG);
dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
return rc;
}
} else {
nfi_writew(nfc, reg, NFI_CNFG);
}
nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON);
nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
init_completion(&nfc->done);
reg = nfi_readl(nfc, NFI_CON) | CON_BRD;
nfi_writel(nfc, reg, NFI_CON);
nfi_writew(nfc, STAR_EN, NFI_STRDATA);
rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
if (!rc)
dev_warn(nfc->dev, "read ahb/dma done timeout\n");
rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
ADDRCNTR_SEC(reg) >= sectors, 10,
MTK_TIMEOUT);
if (rc < 0) {
dev_err(nfc->dev, "subpage done timeout\n");
bitflips = -EIO;
} else if (!raw) {
rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
bitflips = rc < 0 ? -ETIMEDOUT :
mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
mtk_nfc_read_fdm(chip, start, sectors);
}
dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
if (raw)
goto done;
mtk_ecc_disable(nfc->ecc);
if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec)
mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw);
done:
nfi_writel(nfc, 0, NFI_CON);
return bitflips;
}
static int mtk_nfc_read_subpage_hwecc(struct nand_chip *chip, u32 off,
u32 len, u8 *p, int pg)
{
return mtk_nfc_read_subpage(nand_to_mtd(chip), chip, off, len, p, pg,
0);
}
static int mtk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *p, int oob_on,
int pg)
{
struct mtd_info *mtd = nand_to_mtd(chip);
return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
}
static int mtk_nfc_read_page_raw(struct nand_chip *chip, u8 *buf, int oob_on,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
struct mtk_nfc *nfc = nand_get_controller_data(chip);
struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
int i, ret;
memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer,
page, 1);
if (ret < 0)
return ret;
for (i = 0; i < chip->ecc.steps; i++) {
memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
if (i == mtk_nand->bad_mark.sec)
mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
if (buf)
memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
chip->ecc.size);
}
return ret;
}
static int mtk_nfc_read_oob_std(struct nand_chip *chip, int page)
{
return mtk_nfc_read_page_raw(chip, NULL, 1, page);
}
static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
{
/*
* CNRNB: nand ready/busy register
* -------------------------------
* 7:4: timeout register for polling the NAND busy/ready signal
* 0 : poll the status of the busy/ready signal after [7:4]*16 cycles.
*/
nfi_writew(nfc, 0xf1, NFI_CNRNB);
nfi_writel(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
mtk_nfc_hw_reset(nfc);
nfi_readl(nfc, NFI_INTR_STA);
nfi_writel(nfc, 0, NFI_INTR_EN);
}
static irqreturn_t mtk_nfc_irq(int irq, void *id)
{
struct mtk_nfc *nfc = id;
u16 sta, ien;
sta = nfi_readw(nfc, NFI_INTR_STA);
ien = nfi_readw(nfc, NFI_INTR_EN);
if (!(sta & ien))
return IRQ_NONE;
nfi_writew(nfc, ~sta & ien, NFI_INTR_EN);
complete(&nfc->done);
return IRQ_HANDLED;
}
static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oob_region)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
u32 eccsteps;
eccsteps = mtd->writesize / chip->ecc.size;
if (section >= eccsteps)
return -ERANGE;
oob_region->length = fdm->reg_size - fdm->ecc_size;
oob_region->offset = section * fdm->reg_size + fdm->ecc_size;
return 0;
}
static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oob_region)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
u32 eccsteps;
if (section)
return -ERANGE;
eccsteps = mtd->writesize / chip->ecc.size;
oob_region->offset = mtk_nand->fdm.reg_size * eccsteps;
oob_region->length = mtd->oobsize - oob_region->offset;
return 0;
}
static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
.free = mtk_nfc_ooblayout_free,
.ecc = mtk_nfc_ooblayout_ecc,
};
static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
struct mtk_nfc *nfc = nand_get_controller_data(nand);
u32 ecc_bytes;
ecc_bytes = DIV_ROUND_UP(nand->ecc.strength *
mtk_ecc_get_parity_bits(nfc->ecc), 8);
fdm->reg_size = chip->spare_per_sector - ecc_bytes;
if (fdm->reg_size > NFI_FDM_MAX_SIZE)
fdm->reg_size = NFI_FDM_MAX_SIZE;
/* bad block mark storage */
fdm->ecc_size = 1;
}
static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
if (mtd->writesize == 512) {
bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap;
} else {
bm_ctl->bm_swap = mtk_nfc_bad_mark_swap;
bm_ctl->sec = mtd->writesize / mtk_data_len(nand);
bm_ctl->pos = mtd->writesize % mtk_data_len(nand);
}
}
static int mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct mtk_nfc *nfc = nand_get_controller_data(nand);
const u8 *spare = nfc->caps->spare_size;
u32 eccsteps, i, closest_spare = 0;
eccsteps = mtd->writesize / nand->ecc.size;
*sps = mtd->oobsize / eccsteps;
if (nand->ecc.size == 1024)
*sps >>= 1;
if (*sps < MTK_NFC_MIN_SPARE)
return -EINVAL;
for (i = 0; i < nfc->caps->num_spare_size; i++) {
if (*sps >= spare[i] && spare[i] >= spare[closest_spare]) {
closest_spare = i;
if (*sps == spare[i])
break;
}
}
*sps = spare[closest_spare];
if (nand->ecc.size == 1024)
*sps <<= 1;
return 0;
}
static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(&nand->base);
struct mtk_nfc *nfc = nand_get_controller_data(nand);
u32 spare;
int free, ret;
/* support only ecc hw mode */
if (nand->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
dev_err(dev, "ecc.engine_type not supported\n");
return -EINVAL;
}
/* if optional dt settings not present */
if (!nand->ecc.size || !nand->ecc.strength) {
/* use datasheet requirements */
nand->ecc.strength = requirements->strength;
nand->ecc.size = requirements->step_size;
/*
* align eccstrength and eccsize
* this controller only supports 512 and 1024 sizes
*/
if (nand->ecc.size < 1024) {
if (mtd->writesize > 512 &&
nfc->caps->max_sector_size > 512) {
nand->ecc.size = 1024;
nand->ecc.strength <<= 1;
} else {
nand->ecc.size = 512;
}
} else {
nand->ecc.size = 1024;
}
ret = mtk_nfc_set_spare_per_sector(&spare, mtd);
if (ret)
return ret;
/* calculate oob bytes except ecc parity data */
free = (nand->ecc.strength * mtk_ecc_get_parity_bits(nfc->ecc)
+ 7) >> 3;
free = spare - free;
/*
* enhance ecc strength if oob left is bigger than max FDM size
* or reduce ecc strength if oob size is not enough for ecc
* parity data.
*/
if (free > NFI_FDM_MAX_SIZE) {
spare -= NFI_FDM_MAX_SIZE;
nand->ecc.strength = (spare << 3) /
mtk_ecc_get_parity_bits(nfc->ecc);
} else if (free < 0) {
spare -= NFI_FDM_MIN_SIZE;
nand->ecc.strength = (spare << 3) /
mtk_ecc_get_parity_bits(nfc->ecc);
}
}
mtk_ecc_adjust_strength(nfc->ecc, &nand->ecc.strength);
dev_info(dev, "eccsize %d eccstrength %d\n",
nand->ecc.size, nand->ecc.strength);
return 0;
}
static int mtk_nfc_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct device *dev = mtd->dev.parent;
struct mtk_nfc *nfc = nand_get_controller_data(chip);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
int len;
int ret;
if (chip->options & NAND_BUSWIDTH_16) {
dev_err(dev, "16bits buswidth not supported");
return -EINVAL;
}
/* store bbt magic in page, cause OOB is not protected */
if (chip->bbt_options & NAND_BBT_USE_FLASH)
chip->bbt_options |= NAND_BBT_NO_OOB;
ret = mtk_nfc_ecc_init(dev, mtd);
if (ret)
return ret;
ret = mtk_nfc_set_spare_per_sector(&mtk_nand->spare_per_sector, mtd);
if (ret)
return ret;
mtk_nfc_set_fdm(&mtk_nand->fdm, mtd);
mtk_nfc_set_bad_mark_ctl(&mtk_nand->bad_mark, mtd);
len = mtd->writesize + mtd->oobsize;
nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
if (!nfc->buffer)
return -ENOMEM;
return 0;
}
static const struct nand_controller_ops mtk_nfc_controller_ops = {
.attach_chip = mtk_nfc_attach_chip,
.setup_interface = mtk_nfc_setup_interface,
.exec_op = mtk_nfc_exec_op,
};
static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
struct device_node *np)
{
struct mtk_nfc_nand_chip *chip;
struct nand_chip *nand;
struct mtd_info *mtd;
int nsels;
u32 tmp;
int ret;
int i;
if (!of_get_property(np, "reg", &nsels))
return -ENODEV;
nsels /= sizeof(u32);
if (!nsels || nsels > MTK_NAND_MAX_NSELS) {
dev_err(dev, "invalid reg property size %d\n", nsels);
return -EINVAL;
}
chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8),
GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->nsels = nsels;
for (i = 0; i < nsels; i++) {
ret = of_property_read_u32_index(np, "reg", i, &tmp);
if (ret) {
dev_err(dev, "reg property failure : %d\n", ret);
return ret;
}
if (tmp >= MTK_NAND_MAX_NSELS) {
dev_err(dev, "invalid CS: %u\n", tmp);
return -EINVAL;
}
if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
dev_err(dev, "CS %u already assigned\n", tmp);
return -EINVAL;
}
chip->sels[i] = tmp;
}
nand = &chip->nand;
nand->controller = &nfc->controller;
nand_set_flash_node(nand, np);
nand_set_controller_data(nand, nfc);
nand->options |= NAND_USES_DMA | NAND_SUBPAGE_READ;
/* set default mode in case dt entry is missing */
nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
nand->ecc.write_page = mtk_nfc_write_page_hwecc;
nand->ecc.write_oob_raw = mtk_nfc_write_oob_std;
nand->ecc.write_oob = mtk_nfc_write_oob_std;
nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc;
nand->ecc.read_page_raw = mtk_nfc_read_page_raw;
nand->ecc.read_page = mtk_nfc_read_page_hwecc;
nand->ecc.read_oob_raw = mtk_nfc_read_oob_std;
nand->ecc.read_oob = mtk_nfc_read_oob_std;
mtd = nand_to_mtd(nand);
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
mtd->name = MTK_NAME;
mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
mtk_nfc_hw_init(nfc);
ret = nand_scan(nand, nsels);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "mtd parse partition error\n");
nand_cleanup(nand);
return ret;
}
list_add_tail(&chip->node, &nfc->chips);
return 0;
}
static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
{
struct device_node *np = dev->of_node;
struct device_node *nand_np;
int ret;
for_each_child_of_node(np, nand_np) {
ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
if (ret) {
of_node_put(nand_np);
return ret;
}
}
return 0;
}
static const struct mtk_nfc_caps mtk_nfc_caps_mt2701 = {
.spare_size = spare_size_mt2701,
.num_spare_size = 16,
.pageformat_spare_shift = 4,
.nfi_clk_div = 1,
.max_sector = 16,
.max_sector_size = 1024,
};
static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
.spare_size = spare_size_mt2712,
.num_spare_size = 19,
.pageformat_spare_shift = 16,
.nfi_clk_div = 2,
.max_sector = 16,
.max_sector_size = 1024,
};
static const struct mtk_nfc_caps mtk_nfc_caps_mt7622 = {
.spare_size = spare_size_mt7622,
.num_spare_size = 4,
.pageformat_spare_shift = 4,
.nfi_clk_div = 1,
.max_sector = 8,
.max_sector_size = 512,
};
static const struct of_device_id mtk_nfc_id_table[] = {
{
.compatible = "mediatek,mt2701-nfc",
.data = &mtk_nfc_caps_mt2701,
}, {
.compatible = "mediatek,mt2712-nfc",
.data = &mtk_nfc_caps_mt2712,
}, {
.compatible = "mediatek,mt7622-nfc",
.data = &mtk_nfc_caps_mt7622,
},
{}
};
MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
static int mtk_nfc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct mtk_nfc *nfc;
int ret, irq;
nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
if (!nfc)
return -ENOMEM;
nand_controller_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
nfc->controller.ops = &mtk_nfc_controller_ops;
/* probe defer if not ready */
nfc->ecc = of_mtk_ecc_get(np);
if (IS_ERR(nfc->ecc))
return PTR_ERR(nfc->ecc);
else if (!nfc->ecc)
return -ENODEV;
nfc->caps = of_device_get_match_data(dev);
nfc->dev = dev;
nfc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nfc->regs)) {
ret = PTR_ERR(nfc->regs);
goto release_ecc;
}
nfc->clk.nfi_clk = devm_clk_get_enabled(dev, "nfi_clk");
if (IS_ERR(nfc->clk.nfi_clk)) {
dev_err(dev, "no clk\n");
ret = PTR_ERR(nfc->clk.nfi_clk);
goto release_ecc;
}
nfc->clk.pad_clk = devm_clk_get_enabled(dev, "pad_clk");
if (IS_ERR(nfc->clk.pad_clk)) {
dev_err(dev, "no pad clk\n");
ret = PTR_ERR(nfc->clk.pad_clk);
goto release_ecc;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -EINVAL;
goto release_ecc;
}
ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
if (ret) {
dev_err(dev, "failed to request nfi irq\n");
goto release_ecc;
}
ret = dma_set_mask(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "failed to set dma mask\n");
goto release_ecc;
}
platform_set_drvdata(pdev, nfc);
ret = mtk_nfc_nand_chips_init(dev, nfc);
if (ret) {
dev_err(dev, "failed to init nand chips\n");
goto release_ecc;
}
return 0;
release_ecc:
mtk_ecc_release(nfc->ecc);
return ret;
}
static void mtk_nfc_remove(struct platform_device *pdev)
{
struct mtk_nfc *nfc = platform_get_drvdata(pdev);
struct mtk_nfc_nand_chip *mtk_chip;
struct nand_chip *chip;
int ret;
while (!list_empty(&nfc->chips)) {
mtk_chip = list_first_entry(&nfc->chips,
struct mtk_nfc_nand_chip, node);
chip = &mtk_chip->nand;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
list_del(&mtk_chip->node);
}
mtk_ecc_release(nfc->ecc);
}
#ifdef CONFIG_PM_SLEEP
static int mtk_nfc_suspend(struct device *dev)
{
struct mtk_nfc *nfc = dev_get_drvdata(dev);
clk_disable_unprepare(nfc->clk.nfi_clk);
clk_disable_unprepare(nfc->clk.pad_clk);
return 0;
}
static int mtk_nfc_resume(struct device *dev)
{
struct mtk_nfc *nfc = dev_get_drvdata(dev);
struct mtk_nfc_nand_chip *chip;
struct nand_chip *nand;
int ret;
u32 i;
udelay(200);
ret = clk_prepare_enable(nfc->clk.nfi_clk);
if (ret) {
dev_err(dev, "failed to enable nfi clk\n");
return ret;
}
ret = clk_prepare_enable(nfc->clk.pad_clk);
if (ret) {
dev_err(dev, "failed to enable pad clk\n");
clk_disable_unprepare(nfc->clk.nfi_clk);
return ret;
}
/* reset NAND chip if VCC was powered off */
list_for_each_entry(chip, &nfc->chips, node) {
nand = &chip->nand;
for (i = 0; i < chip->nsels; i++)
nand_reset(nand, i);
}
return 0;
}
static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
#endif
static struct platform_driver mtk_nfc_driver = {
.probe = mtk_nfc_probe,
.remove_new = mtk_nfc_remove,
.driver = {
.name = MTK_NAME,
.of_match_table = mtk_nfc_id_table,
#ifdef CONFIG_PM_SLEEP
.pm = &mtk_nfc_pm_ops,
#endif
},
};
module_platform_driver(mtk_nfc_driver);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_AUTHOR("Xiaolei Li <[email protected]>");
MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
| linux-master | drivers/mtd/nand/raw/mtk_nand.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2009 - Maxim Levitsky
* driver for Ricoh xD readers
*/
#define DRV_NAME "r852"
#define pr_fmt(fmt) DRV_NAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <linux/sched.h>
#include "sm_common.h"
#include "r852.h"
static bool r852_enable_dma = 1;
module_param(r852_enable_dma, bool, S_IRUGO);
MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
static int debug;
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
/* read register */
static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
{
uint8_t reg = readb(dev->mmio + address);
return reg;
}
/* write register */
static inline void r852_write_reg(struct r852_device *dev,
int address, uint8_t value)
{
writeb(value, dev->mmio + address);
}
/* read dword sized register */
static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
{
uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
return reg;
}
/* write dword sized register */
static inline void r852_write_reg_dword(struct r852_device *dev,
int address, uint32_t value)
{
writel(cpu_to_le32(value), dev->mmio + address);
}
/* returns pointer to our private structure */
static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
return nand_get_controller_data(chip);
}
/* check if controller supports dma */
static void r852_dma_test(struct r852_device *dev)
{
dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
(R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
if (!dev->dma_usable)
message("Non dma capable device detected, dma disabled");
if (!r852_enable_dma) {
message("disabling dma on user request");
dev->dma_usable = 0;
}
}
/*
* Enable dma. Enables ether first or second stage of the DMA,
* Expects dev->dma_dir and dev->dma_state be set
*/
static void r852_dma_enable(struct r852_device *dev)
{
uint8_t dma_reg, dma_irq_reg;
/* Set up dma settings */
dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
if (dev->dma_dir)
dma_reg |= R852_DMA_READ;
if (dev->dma_state == DMA_INTERNAL) {
dma_reg |= R852_DMA_INTERNAL;
/* Precaution to make sure HW doesn't write */
/* to random kernel memory */
r852_write_reg_dword(dev, R852_DMA_ADDR,
cpu_to_le32(dev->phys_bounce_buffer));
} else {
dma_reg |= R852_DMA_MEMORY;
r852_write_reg_dword(dev, R852_DMA_ADDR,
cpu_to_le32(dev->phys_dma_addr));
}
/* Precaution: make sure write reached the device */
r852_read_reg_dword(dev, R852_DMA_ADDR);
r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
/* Set dma irq */
dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
dma_irq_reg |
R852_DMA_IRQ_INTERNAL |
R852_DMA_IRQ_ERROR |
R852_DMA_IRQ_MEMORY);
}
/*
* Disable dma, called from the interrupt handler, which specifies
* success of the operation via 'error' argument
*/
static void r852_dma_done(struct r852_device *dev, int error)
{
WARN_ON(dev->dma_stage == 0);
r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
/* Precaution to make sure HW doesn't write to random kernel memory */
r852_write_reg_dword(dev, R852_DMA_ADDR,
cpu_to_le32(dev->phys_bounce_buffer));
r852_read_reg_dword(dev, R852_DMA_ADDR);
dev->dma_error = error;
dev->dma_stage = 0;
if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
dma_unmap_single(&dev->pci_dev->dev, dev->phys_dma_addr,
R852_DMA_LEN,
dev->dma_dir ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
/*
* Wait, till dma is done, which includes both phases of it
*/
static int r852_dma_wait(struct r852_device *dev)
{
long timeout = wait_for_completion_timeout(&dev->dma_done,
msecs_to_jiffies(1000));
if (!timeout) {
dbg("timeout waiting for DMA interrupt");
return -ETIMEDOUT;
}
return 0;
}
/*
* Read/Write one page using dma. Only pages can be read (512 bytes)
*/
static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
{
int bounce = 0;
unsigned long flags;
int error;
dev->dma_error = 0;
/* Set dma direction */
dev->dma_dir = do_read;
dev->dma_stage = 1;
reinit_completion(&dev->dma_done);
dbg_verbose("doing dma %s ", do_read ? "read" : "write");
/* Set initial dma state: for reading first fill on board buffer,
from device, for writes first fill the buffer from memory*/
dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
/* if incoming buffer is not page aligned, we should do bounce */
if ((unsigned long)buf & (R852_DMA_LEN-1))
bounce = 1;
if (!bounce) {
dev->phys_dma_addr = dma_map_single(&dev->pci_dev->dev, buf,
R852_DMA_LEN,
do_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
if (dma_mapping_error(&dev->pci_dev->dev, dev->phys_dma_addr))
bounce = 1;
}
if (bounce) {
dbg_verbose("dma: using bounce buffer");
dev->phys_dma_addr = dev->phys_bounce_buffer;
if (!do_read)
memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
}
/* Enable DMA */
spin_lock_irqsave(&dev->irqlock, flags);
r852_dma_enable(dev);
spin_unlock_irqrestore(&dev->irqlock, flags);
/* Wait till complete */
error = r852_dma_wait(dev);
if (error) {
r852_dma_done(dev, error);
return;
}
if (do_read && bounce)
memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
}
/*
* Program data lines of the nand chip to send data to it
*/
static void r852_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
{
struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
uint32_t reg;
/* Don't allow any access to hardware if we suspect card removal */
if (dev->card_unstable)
return;
/* Special case for whole sector read */
if (len == R852_DMA_LEN && dev->dma_usable) {
r852_do_dma(dev, (uint8_t *)buf, 0);
return;
}
/* write DWORD chinks - faster */
while (len >= 4) {
reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
r852_write_reg_dword(dev, R852_DATALINE, reg);
buf += 4;
len -= 4;
}
/* write rest */
while (len > 0) {
r852_write_reg(dev, R852_DATALINE, *buf++);
len--;
}
}
/*
* Read data lines of the nand chip to retrieve data
*/
static void r852_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
uint32_t reg;
if (dev->card_unstable) {
/* since we can't signal error here, at least, return
predictable buffer */
memset(buf, 0, len);
return;
}
/* special case for whole sector read */
if (len == R852_DMA_LEN && dev->dma_usable) {
r852_do_dma(dev, buf, 1);
return;
}
/* read in dword sized chunks */
while (len >= 4) {
reg = r852_read_reg_dword(dev, R852_DATALINE);
*buf++ = reg & 0xFF;
*buf++ = (reg >> 8) & 0xFF;
*buf++ = (reg >> 16) & 0xFF;
*buf++ = (reg >> 24) & 0xFF;
len -= 4;
}
/* read the reset by bytes */
while (len--)
*buf++ = r852_read_reg(dev, R852_DATALINE);
}
/*
* Read one byte from nand chip
*/
static uint8_t r852_read_byte(struct nand_chip *chip)
{
struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
/* Same problem as in r852_read_buf.... */
if (dev->card_unstable)
return 0;
return r852_read_reg(dev, R852_DATALINE);
}
/*
* Control several chip lines & send commands
*/
static void r852_cmdctl(struct nand_chip *chip, int dat, unsigned int ctrl)
{
struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
if (dev->card_unstable)
return;
if (ctrl & NAND_CTRL_CHANGE) {
dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
R852_CTL_ON | R852_CTL_CARDENABLE);
if (ctrl & NAND_ALE)
dev->ctlreg |= R852_CTL_DATA;
if (ctrl & NAND_CLE)
dev->ctlreg |= R852_CTL_COMMAND;
if (ctrl & NAND_NCE)
dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
else
dev->ctlreg &= ~R852_CTL_WRITE;
/* when write is stareted, enable write access */
if (dat == NAND_CMD_ERASE1)
dev->ctlreg |= R852_CTL_WRITE;
r852_write_reg(dev, R852_CTL, dev->ctlreg);
}
/* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
to set write mode */
if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
dev->ctlreg |= R852_CTL_WRITE;
r852_write_reg(dev, R852_CTL, dev->ctlreg);
}
if (dat != NAND_CMD_NONE)
r852_write_reg(dev, R852_DATALINE, dat);
}
/*
* Wait till card is ready.
* based on nand_wait, but returns errors on DMA error
*/
static int r852_wait(struct nand_chip *chip)
{
struct r852_device *dev = nand_get_controller_data(chip);
unsigned long timeout;
u8 status;
timeout = jiffies + msecs_to_jiffies(400);
while (time_before(jiffies, timeout))
if (chip->legacy.dev_ready(chip))
break;
nand_status_op(chip, &status);
/* Unfortunelly, no way to send detailed error status... */
if (dev->dma_error) {
status |= NAND_STATUS_FAIL;
dev->dma_error = 0;
}
return status;
}
/*
* Check if card is ready
*/
static int r852_ready(struct nand_chip *chip)
{
struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
}
/*
* Set ECC engine mode
*/
static void r852_ecc_hwctl(struct nand_chip *chip, int mode)
{
struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
if (dev->card_unstable)
return;
switch (mode) {
case NAND_ECC_READ:
case NAND_ECC_WRITE:
/* enable ecc generation/check*/
dev->ctlreg |= R852_CTL_ECC_ENABLE;
/* flush ecc buffer */
r852_write_reg(dev, R852_CTL,
dev->ctlreg | R852_CTL_ECC_ACCESS);
r852_read_reg_dword(dev, R852_DATALINE);
r852_write_reg(dev, R852_CTL, dev->ctlreg);
return;
case NAND_ECC_READSYN:
/* disable ecc generation */
dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
r852_write_reg(dev, R852_CTL, dev->ctlreg);
}
}
/*
* Calculate ECC, only used for writes
*/
static int r852_ecc_calculate(struct nand_chip *chip, const uint8_t *dat,
uint8_t *ecc_code)
{
struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
struct sm_oob *oob = (struct sm_oob *)ecc_code;
uint32_t ecc1, ecc2;
if (dev->card_unstable)
return 0;
dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
oob->ecc1[0] = (ecc1) & 0xFF;
oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
oob->ecc2[0] = (ecc2) & 0xFF;
oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
r852_write_reg(dev, R852_CTL, dev->ctlreg);
return 0;
}
/*
* Correct the data using ECC, hw did almost everything for us
*/
static int r852_ecc_correct(struct nand_chip *chip, uint8_t *dat,
uint8_t *read_ecc, uint8_t *calc_ecc)
{
uint32_t ecc_reg;
uint8_t ecc_status, err_byte;
int i, error = 0;
struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
if (dev->card_unstable)
return 0;
if (dev->dma_error) {
dev->dma_error = 0;
return -EIO;
}
r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
r852_write_reg(dev, R852_CTL, dev->ctlreg);
for (i = 0 ; i <= 1 ; i++) {
ecc_status = (ecc_reg >> 8) & 0xFF;
/* ecc uncorrectable error */
if (ecc_status & R852_ECC_FAIL) {
dbg("ecc: unrecoverable error, in half %d", i);
error = -EBADMSG;
goto exit;
}
/* correctable error */
if (ecc_status & R852_ECC_CORRECTABLE) {
err_byte = ecc_reg & 0xFF;
dbg("ecc: recoverable error, "
"in half %d, byte %d, bit %d", i,
err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
dat[err_byte] ^=
1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
error++;
}
dat += 256;
ecc_reg >>= 16;
}
exit:
return error;
}
/*
* This is copy of nand_read_oob_std
* nand_read_oob_syndrome assumes we can send column address - we can't
*/
static int r852_read_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/*
* Start the nand engine
*/
static void r852_engine_enable(struct r852_device *dev)
{
if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
} else {
r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
}
msleep(300);
r852_write_reg(dev, R852_CTL, 0);
}
/*
* Stop the nand engine
*/
static void r852_engine_disable(struct r852_device *dev)
{
r852_write_reg_dword(dev, R852_HW, 0);
r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
}
/*
* Test if card is present
*/
static void r852_card_update_present(struct r852_device *dev)
{
unsigned long flags;
uint8_t reg;
spin_lock_irqsave(&dev->irqlock, flags);
reg = r852_read_reg(dev, R852_CARD_STA);
dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
spin_unlock_irqrestore(&dev->irqlock, flags);
}
/*
* Update card detection IRQ state according to current card state
* which is read in r852_card_update_present
*/
static void r852_update_card_detect(struct r852_device *dev)
{
int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
dev->card_unstable = 0;
card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
card_detect_reg |= R852_CARD_IRQ_GENABLE;
card_detect_reg |= dev->card_detected ?
R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
}
static ssize_t media_type_show(struct device *sys_dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
struct r852_device *dev = r852_get_dev(mtd);
char *data = dev->sm ? "smartmedia" : "xd";
strcpy(buf, data);
return strlen(data);
}
static DEVICE_ATTR_RO(media_type);
/* Detect properties of card in slot */
static void r852_update_media_status(struct r852_device *dev)
{
uint8_t reg;
unsigned long flags;
int readonly;
spin_lock_irqsave(&dev->irqlock, flags);
if (!dev->card_detected) {
message("card removed");
spin_unlock_irqrestore(&dev->irqlock, flags);
return ;
}
readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
reg = r852_read_reg(dev, R852_DMA_CAP);
dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
message("detected %s %s card in slot",
dev->sm ? "SmartMedia" : "xD",
readonly ? "readonly" : "writeable");
dev->readonly = readonly;
spin_unlock_irqrestore(&dev->irqlock, flags);
}
/*
* Register the nand device
* Called when the card is detected
*/
static int r852_register_nand_device(struct r852_device *dev)
{
struct mtd_info *mtd = nand_to_mtd(dev->chip);
WARN_ON(dev->card_registered);
mtd->dev.parent = &dev->pci_dev->dev;
if (dev->readonly)
dev->chip->options |= NAND_ROM;
r852_engine_enable(dev);
if (sm_register_device(mtd, dev->sm))
goto error1;
if (device_create_file(&mtd->dev, &dev_attr_media_type)) {
message("can't create media type sysfs attribute");
goto error3;
}
dev->card_registered = 1;
return 0;
error3:
WARN_ON(mtd_device_unregister(nand_to_mtd(dev->chip)));
nand_cleanup(dev->chip);
error1:
/* Force card redetect */
dev->card_detected = 0;
return -1;
}
/*
* Unregister the card
*/
static void r852_unregister_nand_device(struct r852_device *dev)
{
struct mtd_info *mtd = nand_to_mtd(dev->chip);
if (!dev->card_registered)
return;
device_remove_file(&mtd->dev, &dev_attr_media_type);
WARN_ON(mtd_device_unregister(mtd));
nand_cleanup(dev->chip);
r852_engine_disable(dev);
dev->card_registered = 0;
}
/* Card state updater */
static void r852_card_detect_work(struct work_struct *work)
{
struct r852_device *dev =
container_of(work, struct r852_device, card_detect_work.work);
r852_card_update_present(dev);
r852_update_card_detect(dev);
dev->card_unstable = 0;
/* False alarm */
if (dev->card_detected == dev->card_registered)
goto exit;
/* Read media properties */
r852_update_media_status(dev);
/* Register the card */
if (dev->card_detected)
r852_register_nand_device(dev);
else
r852_unregister_nand_device(dev);
exit:
r852_update_card_detect(dev);
}
/* Ack + disable IRQ generation */
static void r852_disable_irqs(struct r852_device *dev)
{
uint8_t reg;
reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
reg & ~R852_DMA_IRQ_MASK);
r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
}
/* Interrupt handler */
static irqreturn_t r852_irq(int irq, void *data)
{
struct r852_device *dev = (struct r852_device *)data;
uint8_t card_status, dma_status;
irqreturn_t ret = IRQ_NONE;
spin_lock(&dev->irqlock);
/* handle card detection interrupts first */
card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
ret = IRQ_HANDLED;
dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
/* we shouldn't receive any interrupts if we wait for card
to settle */
WARN_ON(dev->card_unstable);
/* disable irqs while card is unstable */
/* this will timeout DMA if active, but better that garbage */
r852_disable_irqs(dev);
if (dev->card_unstable)
goto out;
/* let, card state to settle a bit, and then do the work */
dev->card_unstable = 1;
queue_delayed_work(dev->card_workqueue,
&dev->card_detect_work, msecs_to_jiffies(100));
goto out;
}
/* Handle dma interrupts */
dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
if (dma_status & R852_DMA_IRQ_MASK) {
ret = IRQ_HANDLED;
if (dma_status & R852_DMA_IRQ_ERROR) {
dbg("received dma error IRQ");
r852_dma_done(dev, -EIO);
complete(&dev->dma_done);
goto out;
}
/* received DMA interrupt out of nowhere? */
WARN_ON_ONCE(dev->dma_stage == 0);
if (dev->dma_stage == 0)
goto out;
/* done device access */
if (dev->dma_state == DMA_INTERNAL &&
(dma_status & R852_DMA_IRQ_INTERNAL)) {
dev->dma_state = DMA_MEMORY;
dev->dma_stage++;
}
/* done memory DMA */
if (dev->dma_state == DMA_MEMORY &&
(dma_status & R852_DMA_IRQ_MEMORY)) {
dev->dma_state = DMA_INTERNAL;
dev->dma_stage++;
}
/* Enable 2nd half of dma dance */
if (dev->dma_stage == 2)
r852_dma_enable(dev);
/* Operation done */
if (dev->dma_stage == 3) {
r852_dma_done(dev, 0);
complete(&dev->dma_done);
}
goto out;
}
/* Handle unknown interrupts */
if (dma_status)
dbg("bad dma IRQ status = %x", dma_status);
if (card_status & ~R852_CARD_STA_CD)
dbg("strange card status = %x", card_status);
out:
spin_unlock(&dev->irqlock);
return ret;
}
static int r852_attach_chip(struct nand_chip *chip)
{
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return 0;
chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
chip->ecc.size = R852_DMA_LEN;
chip->ecc.bytes = SM_OOB_SIZE;
chip->ecc.strength = 2;
chip->ecc.hwctl = r852_ecc_hwctl;
chip->ecc.calculate = r852_ecc_calculate;
chip->ecc.correct = r852_ecc_correct;
/* TODO: hack */
chip->ecc.read_oob = r852_read_oob;
return 0;
}
static const struct nand_controller_ops r852_ops = {
.attach_chip = r852_attach_chip,
};
static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
{
int error;
struct nand_chip *chip;
struct r852_device *dev;
/* pci initialization */
error = pci_enable_device(pci_dev);
if (error)
goto error1;
pci_set_master(pci_dev);
error = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
if (error)
goto error2;
error = pci_request_regions(pci_dev, DRV_NAME);
if (error)
goto error3;
error = -ENOMEM;
/* init nand chip, but register it only on card insert */
chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
if (!chip)
goto error4;
/* commands */
chip->legacy.cmd_ctrl = r852_cmdctl;
chip->legacy.waitfunc = r852_wait;
chip->legacy.dev_ready = r852_ready;
/* I/O */
chip->legacy.read_byte = r852_read_byte;
chip->legacy.read_buf = r852_read_buf;
chip->legacy.write_buf = r852_write_buf;
/* init our device structure */
dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
if (!dev)
goto error5;
nand_set_controller_data(chip, dev);
dev->chip = chip;
dev->pci_dev = pci_dev;
pci_set_drvdata(pci_dev, dev);
nand_controller_init(&dev->controller);
dev->controller.ops = &r852_ops;
chip->controller = &dev->controller;
dev->bounce_buffer = dma_alloc_coherent(&pci_dev->dev, R852_DMA_LEN,
&dev->phys_bounce_buffer, GFP_KERNEL);
if (!dev->bounce_buffer)
goto error6;
error = -ENODEV;
dev->mmio = pci_ioremap_bar(pci_dev, 0);
if (!dev->mmio)
goto error7;
error = -ENOMEM;
dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
if (!dev->tmp_buffer)
goto error8;
init_completion(&dev->dma_done);
dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
if (!dev->card_workqueue)
goto error9;
INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
/* shutdown everything - precation */
r852_engine_disable(dev);
r852_disable_irqs(dev);
r852_dma_test(dev);
dev->irq = pci_dev->irq;
spin_lock_init(&dev->irqlock);
dev->card_detected = 0;
r852_card_update_present(dev);
/*register irq handler*/
error = -ENODEV;
if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
DRV_NAME, dev))
goto error10;
/* kick initial present test */
queue_delayed_work(dev->card_workqueue,
&dev->card_detect_work, 0);
pr_notice("driver loaded successfully\n");
return 0;
error10:
destroy_workqueue(dev->card_workqueue);
error9:
kfree(dev->tmp_buffer);
error8:
pci_iounmap(pci_dev, dev->mmio);
error7:
dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer,
dev->phys_bounce_buffer);
error6:
kfree(dev);
error5:
kfree(chip);
error4:
pci_release_regions(pci_dev);
error3:
error2:
pci_disable_device(pci_dev);
error1:
return error;
}
static void r852_remove(struct pci_dev *pci_dev)
{
struct r852_device *dev = pci_get_drvdata(pci_dev);
/* Stop detect workqueue -
we are going to unregister the device anyway*/
cancel_delayed_work_sync(&dev->card_detect_work);
destroy_workqueue(dev->card_workqueue);
/* Unregister the device, this might make more IO */
r852_unregister_nand_device(dev);
/* Stop interrupts */
r852_disable_irqs(dev);
free_irq(dev->irq, dev);
/* Cleanup */
kfree(dev->tmp_buffer);
pci_iounmap(pci_dev, dev->mmio);
dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer,
dev->phys_bounce_buffer);
kfree(dev->chip);
kfree(dev);
/* Shutdown the PCI device */
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
}
static void r852_shutdown(struct pci_dev *pci_dev)
{
struct r852_device *dev = pci_get_drvdata(pci_dev);
cancel_delayed_work_sync(&dev->card_detect_work);
r852_disable_irqs(dev);
synchronize_irq(dev->irq);
pci_disable_device(pci_dev);
}
#ifdef CONFIG_PM_SLEEP
static int r852_suspend(struct device *device)
{
struct r852_device *dev = dev_get_drvdata(device);
if (dev->ctlreg & R852_CTL_CARDENABLE)
return -EBUSY;
/* First make sure the detect work is gone */
cancel_delayed_work_sync(&dev->card_detect_work);
/* Turn off the interrupts and stop the device */
r852_disable_irqs(dev);
r852_engine_disable(dev);
/* If card was pulled off just during the suspend, which is very
unlikely, we will remove it on resume, it too late now
anyway... */
dev->card_unstable = 0;
return 0;
}
static int r852_resume(struct device *device)
{
struct r852_device *dev = dev_get_drvdata(device);
r852_disable_irqs(dev);
r852_card_update_present(dev);
r852_engine_disable(dev);
/* If card status changed, just do the work */
if (dev->card_detected != dev->card_registered) {
dbg("card was %s during low power state",
dev->card_detected ? "added" : "removed");
queue_delayed_work(dev->card_workqueue,
&dev->card_detect_work, msecs_to_jiffies(1000));
return 0;
}
/* Otherwise, initialize the card */
if (dev->card_registered) {
r852_engine_enable(dev);
nand_select_target(dev->chip, 0);
nand_reset_op(dev->chip);
nand_deselect_target(dev->chip);
}
/* Program card detection IRQ */
r852_update_card_detect(dev);
return 0;
}
#endif
static const struct pci_device_id r852_pci_id_tbl[] = {
{ PCI_VDEVICE(RICOH, 0x0852), },
{ },
};
MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
static struct pci_driver r852_pci_driver = {
.name = DRV_NAME,
.id_table = r852_pci_id_tbl,
.probe = r852_probe,
.remove = r852_remove,
.shutdown = r852_shutdown,
.driver.pm = &r852_pm_ops,
};
module_pci_driver(r852_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Maxim Levitsky <[email protected]>");
MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
| linux-master | drivers/mtd/nand/raw/r852.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* TXx9 NAND flash memory controller driver
* Based on RBTX49xx patch from CELF patch archive.
*
* (C) Copyright TOSHIBA CORPORATION 2004-2007
* All Rights Reserved.
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
#include <linux/platform_data/txx9/ndfmc.h>
/* TXX9 NDFMC Registers */
#define TXX9_NDFDTR 0x00
#define TXX9_NDFMCR 0x04
#define TXX9_NDFSR 0x08
#define TXX9_NDFISR 0x0c
#define TXX9_NDFIMR 0x10
#define TXX9_NDFSPR 0x14
#define TXX9_NDFRSTR 0x18 /* not TX4939 */
/* NDFMCR : NDFMC Mode Control */
#define TXX9_NDFMCR_WE 0x80
#define TXX9_NDFMCR_ECC_ALL 0x60
#define TXX9_NDFMCR_ECC_RESET 0x60
#define TXX9_NDFMCR_ECC_READ 0x40
#define TXX9_NDFMCR_ECC_ON 0x20
#define TXX9_NDFMCR_ECC_OFF 0x00
#define TXX9_NDFMCR_CE 0x10
#define TXX9_NDFMCR_BSPRT 0x04 /* TX4925/TX4926 only */
#define TXX9_NDFMCR_ALE 0x02
#define TXX9_NDFMCR_CLE 0x01
/* TX4939 only */
#define TXX9_NDFMCR_X16 0x0400
#define TXX9_NDFMCR_DMAREQ_MASK 0x0300
#define TXX9_NDFMCR_DMAREQ_NODMA 0x0000
#define TXX9_NDFMCR_DMAREQ_128 0x0100
#define TXX9_NDFMCR_DMAREQ_256 0x0200
#define TXX9_NDFMCR_DMAREQ_512 0x0300
#define TXX9_NDFMCR_CS_MASK 0x0c
#define TXX9_NDFMCR_CS(ch) ((ch) << 2)
/* NDFMCR : NDFMC Status */
#define TXX9_NDFSR_BUSY 0x80
/* TX4939 only */
#define TXX9_NDFSR_DMARUN 0x40
/* NDFMCR : NDFMC Reset */
#define TXX9_NDFRSTR_RST 0x01
struct txx9ndfmc_priv {
struct platform_device *dev;
struct nand_chip chip;
int cs;
const char *mtdname;
};
#define MAX_TXX9NDFMC_DEV 4
struct txx9ndfmc_drvdata {
struct mtd_info *mtds[MAX_TXX9NDFMC_DEV];
void __iomem *base;
unsigned char hold; /* in gbusclock */
unsigned char spw; /* in gbusclock */
struct nand_controller controller;
};
static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct txx9ndfmc_priv *txx9_priv = nand_get_controller_data(chip);
return txx9_priv->dev;
}
static void __iomem *ndregaddr(struct platform_device *dev, unsigned int reg)
{
struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
return drvdata->base + (reg << plat->shift);
}
static u32 txx9ndfmc_read(struct platform_device *dev, unsigned int reg)
{
return __raw_readl(ndregaddr(dev, reg));
}
static void txx9ndfmc_write(struct platform_device *dev,
u32 val, unsigned int reg)
{
__raw_writel(val, ndregaddr(dev, reg));
}
static uint8_t txx9ndfmc_read_byte(struct nand_chip *chip)
{
struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
return txx9ndfmc_read(dev, TXX9_NDFDTR);
}
static void txx9ndfmc_write_buf(struct nand_chip *chip, const uint8_t *buf,
int len)
{
struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_WE, TXX9_NDFMCR);
while (len--)
__raw_writel(*buf++, ndfdtr);
txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
}
static void txx9ndfmc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
while (len--)
*buf++ = __raw_readl(ndfdtr);
}
static void txx9ndfmc_cmd_ctrl(struct nand_chip *chip, int cmd,
unsigned int ctrl)
{
struct txx9ndfmc_priv *txx9_priv = nand_get_controller_data(chip);
struct platform_device *dev = txx9_priv->dev;
struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
if (ctrl & NAND_CTRL_CHANGE) {
u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
mcr &= ~(TXX9_NDFMCR_CLE | TXX9_NDFMCR_ALE | TXX9_NDFMCR_CE);
mcr |= ctrl & NAND_CLE ? TXX9_NDFMCR_CLE : 0;
mcr |= ctrl & NAND_ALE ? TXX9_NDFMCR_ALE : 0;
/* TXX9_NDFMCR_CE bit is 0:high 1:low */
mcr |= ctrl & NAND_NCE ? TXX9_NDFMCR_CE : 0;
if (txx9_priv->cs >= 0 && (ctrl & NAND_NCE)) {
mcr &= ~TXX9_NDFMCR_CS_MASK;
mcr |= TXX9_NDFMCR_CS(txx9_priv->cs);
}
txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
}
if (cmd != NAND_CMD_NONE)
txx9ndfmc_write(dev, cmd & 0xff, TXX9_NDFDTR);
if (plat->flags & NDFMC_PLAT_FLAG_DUMMYWRITE) {
/* dummy write to update external latch */
if ((ctrl & NAND_CTRL_CHANGE) && cmd == NAND_CMD_NONE)
txx9ndfmc_write(dev, 0, TXX9_NDFDTR);
}
}
static int txx9ndfmc_dev_ready(struct nand_chip *chip)
{
struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
return !(txx9ndfmc_read(dev, TXX9_NDFSR) & TXX9_NDFSR_BUSY);
}
static int txx9ndfmc_calculate_ecc(struct nand_chip *chip, const uint8_t *dat,
uint8_t *ecc_code)
{
struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
int eccbytes;
u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
mcr &= ~TXX9_NDFMCR_ECC_ALL;
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_READ, TXX9_NDFMCR);
for (eccbytes = chip->ecc.bytes; eccbytes > 0; eccbytes -= 3) {
ecc_code[1] = txx9ndfmc_read(dev, TXX9_NDFDTR);
ecc_code[0] = txx9ndfmc_read(dev, TXX9_NDFDTR);
ecc_code[2] = txx9ndfmc_read(dev, TXX9_NDFDTR);
ecc_code += 3;
}
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
return 0;
}
static int txx9ndfmc_correct_data(struct nand_chip *chip, unsigned char *buf,
unsigned char *read_ecc,
unsigned char *calc_ecc)
{
int eccsize;
int corrected = 0;
int stat;
for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) {
stat = rawnand_sw_hamming_correct(chip, buf, read_ecc,
calc_ecc);
if (stat < 0)
return stat;
corrected += stat;
buf += 256;
read_ecc += 3;
calc_ecc += 3;
}
return corrected;
}
static void txx9ndfmc_enable_hwecc(struct nand_chip *chip, int mode)
{
struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
mcr &= ~TXX9_NDFMCR_ECC_ALL;
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_RESET, TXX9_NDFMCR);
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_ON, TXX9_NDFMCR);
}
static void txx9ndfmc_initialize(struct platform_device *dev)
{
struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
int tmout = 100;
if (plat->flags & NDFMC_PLAT_FLAG_NO_RSTR)
; /* no NDFRSTR. Write to NDFSPR resets the NDFMC. */
else {
/* reset NDFMC */
txx9ndfmc_write(dev,
txx9ndfmc_read(dev, TXX9_NDFRSTR) |
TXX9_NDFRSTR_RST,
TXX9_NDFRSTR);
while (txx9ndfmc_read(dev, TXX9_NDFRSTR) & TXX9_NDFRSTR_RST) {
if (--tmout == 0) {
dev_err(&dev->dev, "reset failed.\n");
break;
}
udelay(1);
}
}
/* setup Hold Time, Strobe Pulse Width */
txx9ndfmc_write(dev, (drvdata->hold << 4) | drvdata->spw, TXX9_NDFSPR);
txx9ndfmc_write(dev,
(plat->flags & NDFMC_PLAT_FLAG_USE_BSPRT) ?
TXX9_NDFMCR_BSPRT : 0, TXX9_NDFMCR);
}
#define TXX9NDFMC_NS_TO_CYC(gbusclk, ns) \
DIV_ROUND_UP((ns) * DIV_ROUND_UP(gbusclk, 1000), 1000000)
static int txx9ndfmc_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return 0;
chip->ecc.strength = 1;
if (mtd->writesize >= 512) {
chip->ecc.size = 512;
chip->ecc.bytes = 6;
} else {
chip->ecc.size = 256;
chip->ecc.bytes = 3;
}
chip->ecc.calculate = txx9ndfmc_calculate_ecc;
chip->ecc.correct = txx9ndfmc_correct_data;
chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
return 0;
}
static const struct nand_controller_ops txx9ndfmc_controller_ops = {
.attach_chip = txx9ndfmc_attach_chip,
};
static int __init txx9ndfmc_probe(struct platform_device *dev)
{
struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
int hold, spw;
int i;
struct txx9ndfmc_drvdata *drvdata;
unsigned long gbusclk = plat->gbus_clock;
drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->base = devm_platform_ioremap_resource(dev, 0);
if (IS_ERR(drvdata->base))
return PTR_ERR(drvdata->base);
hold = plat->hold ?: 20; /* tDH */
spw = plat->spw ?: 90; /* max(tREADID, tWP, tRP) */
hold = TXX9NDFMC_NS_TO_CYC(gbusclk, hold);
spw = TXX9NDFMC_NS_TO_CYC(gbusclk, spw);
if (plat->flags & NDFMC_PLAT_FLAG_HOLDADD)
hold -= 2; /* actual hold time : (HOLD + 2) BUSCLK */
spw -= 1; /* actual wait time : (SPW + 1) BUSCLK */
hold = clamp(hold, 1, 15);
drvdata->hold = hold;
spw = clamp(spw, 1, 15);
drvdata->spw = spw;
dev_info(&dev->dev, "CLK:%ldMHz HOLD:%d SPW:%d\n",
(gbusclk + 500000) / 1000000, hold, spw);
nand_controller_init(&drvdata->controller);
drvdata->controller.ops = &txx9ndfmc_controller_ops;
platform_set_drvdata(dev, drvdata);
txx9ndfmc_initialize(dev);
for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
struct txx9ndfmc_priv *txx9_priv;
struct nand_chip *chip;
struct mtd_info *mtd;
if (!(plat->ch_mask & (1 << i)))
continue;
txx9_priv = kzalloc(sizeof(struct txx9ndfmc_priv),
GFP_KERNEL);
if (!txx9_priv)
continue;
chip = &txx9_priv->chip;
mtd = nand_to_mtd(chip);
mtd->dev.parent = &dev->dev;
chip->legacy.read_byte = txx9ndfmc_read_byte;
chip->legacy.read_buf = txx9ndfmc_read_buf;
chip->legacy.write_buf = txx9ndfmc_write_buf;
chip->legacy.cmd_ctrl = txx9ndfmc_cmd_ctrl;
chip->legacy.dev_ready = txx9ndfmc_dev_ready;
chip->legacy.chip_delay = 100;
chip->controller = &drvdata->controller;
nand_set_controller_data(chip, txx9_priv);
txx9_priv->dev = dev;
if (plat->ch_mask != 1) {
txx9_priv->cs = i;
txx9_priv->mtdname = kasprintf(GFP_KERNEL, "%s.%u",
dev_name(&dev->dev), i);
} else {
txx9_priv->cs = -1;
txx9_priv->mtdname = kstrdup(dev_name(&dev->dev),
GFP_KERNEL);
}
if (!txx9_priv->mtdname) {
kfree(txx9_priv);
dev_err(&dev->dev, "Unable to allocate MTD name.\n");
continue;
}
if (plat->wide_mask & (1 << i))
chip->options |= NAND_BUSWIDTH_16;
if (nand_scan(chip, 1)) {
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
continue;
}
mtd->name = txx9_priv->mtdname;
mtd_device_register(mtd, NULL, 0);
drvdata->mtds[i] = mtd;
}
return 0;
}
static int __exit txx9ndfmc_remove(struct platform_device *dev)
{
struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
int ret, i;
if (!drvdata)
return 0;
for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
struct mtd_info *mtd = drvdata->mtds[i];
struct nand_chip *chip;
struct txx9ndfmc_priv *txx9_priv;
if (!mtd)
continue;
chip = mtd_to_nand(mtd);
txx9_priv = nand_get_controller_data(chip);
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
return 0;
}
#ifdef CONFIG_PM
static int txx9ndfmc_resume(struct platform_device *dev)
{
if (platform_get_drvdata(dev))
txx9ndfmc_initialize(dev);
return 0;
}
#else
#define txx9ndfmc_resume NULL
#endif
static struct platform_driver txx9ndfmc_driver = {
.remove = __exit_p(txx9ndfmc_remove),
.resume = txx9ndfmc_resume,
.driver = {
.name = "txx9ndfmc",
},
};
module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver");
MODULE_ALIAS("platform:txx9ndfmc");
| linux-master | drivers/mtd/nand/raw/txx9ndfmc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ST Microelectronics
* Flexible Static Memory Controller (FSMC)
* Driver for NAND portions
*
* Copyright © 2010 ST Microelectronics
* Vipin Kumar <[email protected]>
* Ashish Priyadarshi
*
* Based on drivers/mtd/nand/nomadik_nand.c (removed in v3.8)
* Copyright © 2007 STMicroelectronics Pvt. Ltd.
* Copyright © 2009 Alessandro Rubini
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/resource.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/amba/bus.h>
#include <mtd/mtd-abi.h>
/* fsmc controller registers for NOR flash */
#define CTRL 0x0
/* ctrl register definitions */
#define BANK_ENABLE BIT(0)
#define MUXED BIT(1)
#define NOR_DEV (2 << 2)
#define WIDTH_16 BIT(4)
#define RSTPWRDWN BIT(6)
#define WPROT BIT(7)
#define WRT_ENABLE BIT(12)
#define WAIT_ENB BIT(13)
#define CTRL_TIM 0x4
/* ctrl_tim register definitions */
#define FSMC_NOR_BANK_SZ 0x8
#define FSMC_NOR_REG_SIZE 0x40
#define FSMC_NOR_REG(base, bank, reg) ((base) + \
(FSMC_NOR_BANK_SZ * (bank)) + \
(reg))
/* fsmc controller registers for NAND flash */
#define FSMC_PC 0x00
/* pc register definitions */
#define FSMC_RESET BIT(0)
#define FSMC_WAITON BIT(1)
#define FSMC_ENABLE BIT(2)
#define FSMC_DEVTYPE_NAND BIT(3)
#define FSMC_DEVWID_16 BIT(4)
#define FSMC_ECCEN BIT(6)
#define FSMC_ECCPLEN_256 BIT(7)
#define FSMC_TCLR_SHIFT (9)
#define FSMC_TCLR_MASK (0xF)
#define FSMC_TAR_SHIFT (13)
#define FSMC_TAR_MASK (0xF)
#define STS 0x04
/* sts register definitions */
#define FSMC_CODE_RDY BIT(15)
#define COMM 0x08
/* comm register definitions */
#define FSMC_TSET_SHIFT 0
#define FSMC_TSET_MASK 0xFF
#define FSMC_TWAIT_SHIFT 8
#define FSMC_TWAIT_MASK 0xFF
#define FSMC_THOLD_SHIFT 16
#define FSMC_THOLD_MASK 0xFF
#define FSMC_THIZ_SHIFT 24
#define FSMC_THIZ_MASK 0xFF
#define ATTRIB 0x0C
#define IOATA 0x10
#define ECC1 0x14
#define ECC2 0x18
#define ECC3 0x1C
#define FSMC_NAND_BANK_SZ 0x20
#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
/*
* According to SPEAr300 Reference Manual (RM0082)
* TOUDEL = 7ns (Output delay from the flip-flops to the board)
* TINDEL = 5ns (Input delay from the board to the flipflop)
*/
#define TOUTDEL 7000
#define TINDEL 5000
struct fsmc_nand_timings {
u8 tclr;
u8 tar;
u8 thiz;
u8 thold;
u8 twait;
u8 tset;
};
enum access_mode {
USE_DMA_ACCESS = 1,
USE_WORD_ACCESS,
};
/**
* struct fsmc_nand_data - structure for FSMC NAND device state
*
* @base: Inherit from the nand_controller struct
* @pid: Part ID on the AMBA PrimeCell format
* @nand: Chip related info for a NAND flash.
*
* @bank: Bank number for probed device.
* @dev: Parent device
* @mode: Access mode
* @clk: Clock structure for FSMC.
*
* @read_dma_chan: DMA channel for read access
* @write_dma_chan: DMA channel for write access to NAND
* @dma_access_complete: Completion structure
*
* @dev_timings: NAND timings
*
* @data_pa: NAND Physical port for Data.
* @data_va: NAND port for Data.
* @cmd_va: NAND port for Command.
* @addr_va: NAND port for Address.
* @regs_va: Registers base address for a given bank.
*/
struct fsmc_nand_data {
struct nand_controller base;
u32 pid;
struct nand_chip nand;
unsigned int bank;
struct device *dev;
enum access_mode mode;
struct clk *clk;
/* DMA related objects */
struct dma_chan *read_dma_chan;
struct dma_chan *write_dma_chan;
struct completion dma_access_complete;
struct fsmc_nand_timings *dev_timings;
dma_addr_t data_pa;
void __iomem *data_va;
void __iomem *cmd_va;
void __iomem *addr_va;
void __iomem *regs_va;
};
static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section >= chip->ecc.steps)
return -ERANGE;
oobregion->offset = (section * 16) + 2;
oobregion->length = 3;
return 0;
}
static int fsmc_ecc1_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section >= chip->ecc.steps)
return -ERANGE;
oobregion->offset = (section * 16) + 8;
if (section < chip->ecc.steps - 1)
oobregion->length = 8;
else
oobregion->length = mtd->oobsize - oobregion->offset;
return 0;
}
static const struct mtd_ooblayout_ops fsmc_ecc1_ooblayout_ops = {
.ecc = fsmc_ecc1_ooblayout_ecc,
.free = fsmc_ecc1_ooblayout_free,
};
/*
* ECC placement definitions in oobfree type format.
* There are 13 bytes of ecc for every 512 byte block and it has to be read
* consecutively and immediately after the 512 byte data block for hardware to
* generate the error bit offsets in 512 byte data.
*/
static int fsmc_ecc4_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section >= chip->ecc.steps)
return -ERANGE;
oobregion->length = chip->ecc.bytes;
if (!section && mtd->writesize <= 512)
oobregion->offset = 0;
else
oobregion->offset = (section * 16) + 2;
return 0;
}
static int fsmc_ecc4_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section >= chip->ecc.steps)
return -ERANGE;
oobregion->offset = (section * 16) + 15;
if (section < chip->ecc.steps - 1)
oobregion->length = 3;
else
oobregion->length = mtd->oobsize - oobregion->offset;
return 0;
}
static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = {
.ecc = fsmc_ecc4_ooblayout_ecc,
.free = fsmc_ecc4_ooblayout_free,
};
static inline struct fsmc_nand_data *nand_to_fsmc(struct nand_chip *chip)
{
return container_of(chip, struct fsmc_nand_data, nand);
}
/*
* fsmc_nand_setup - FSMC (Flexible Static Memory Controller) init routine
*
* This routine initializes timing parameters related to NAND memory access in
* FSMC registers
*/
static void fsmc_nand_setup(struct fsmc_nand_data *host,
struct fsmc_nand_timings *tims)
{
u32 value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
u32 tclr, tar, thiz, thold, twait, tset;
tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
if (host->nand.options & NAND_BUSWIDTH_16)
value |= FSMC_DEVWID_16;
writel_relaxed(value | tclr | tar, host->regs_va + FSMC_PC);
writel_relaxed(thiz | thold | twait | tset, host->regs_va + COMM);
writel_relaxed(thiz | thold | twait | tset, host->regs_va + ATTRIB);
}
static int fsmc_calc_timings(struct fsmc_nand_data *host,
const struct nand_sdr_timings *sdrt,
struct fsmc_nand_timings *tims)
{
unsigned long hclk = clk_get_rate(host->clk);
unsigned long hclkn = NSEC_PER_SEC / hclk;
u32 thiz, thold, twait, tset, twait_min;
if (sdrt->tRC_min < 30000)
return -EOPNOTSUPP;
tims->tar = DIV_ROUND_UP(sdrt->tAR_min / 1000, hclkn) - 1;
if (tims->tar > FSMC_TAR_MASK)
tims->tar = FSMC_TAR_MASK;
tims->tclr = DIV_ROUND_UP(sdrt->tCLR_min / 1000, hclkn) - 1;
if (tims->tclr > FSMC_TCLR_MASK)
tims->tclr = FSMC_TCLR_MASK;
thiz = sdrt->tCS_min - sdrt->tWP_min;
tims->thiz = DIV_ROUND_UP(thiz / 1000, hclkn);
thold = sdrt->tDH_min;
if (thold < sdrt->tCH_min)
thold = sdrt->tCH_min;
if (thold < sdrt->tCLH_min)
thold = sdrt->tCLH_min;
if (thold < sdrt->tWH_min)
thold = sdrt->tWH_min;
if (thold < sdrt->tALH_min)
thold = sdrt->tALH_min;
if (thold < sdrt->tREH_min)
thold = sdrt->tREH_min;
tims->thold = DIV_ROUND_UP(thold / 1000, hclkn);
if (tims->thold == 0)
tims->thold = 1;
else if (tims->thold > FSMC_THOLD_MASK)
tims->thold = FSMC_THOLD_MASK;
tset = max(sdrt->tCS_min - sdrt->tWP_min,
sdrt->tCEA_max - sdrt->tREA_max);
tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1;
if (tims->tset == 0)
tims->tset = 1;
else if (tims->tset > FSMC_TSET_MASK)
tims->tset = FSMC_TSET_MASK;
/*
* According to SPEAr300 Reference Manual (RM0082) which gives more
* information related to FSMSC timings than the SPEAr600 one (RM0305),
* twait >= tCEA - (tset * TCLK) + TOUTDEL + TINDEL
*/
twait_min = sdrt->tCEA_max - ((tims->tset + 1) * hclkn * 1000)
+ TOUTDEL + TINDEL;
twait = max3(sdrt->tRP_min, sdrt->tWP_min, twait_min);
tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
if (tims->twait == 0)
tims->twait = 1;
else if (tims->twait > FSMC_TWAIT_MASK)
tims->twait = FSMC_TWAIT_MASK;
return 0;
}
static int fsmc_setup_interface(struct nand_chip *nand, int csline,
const struct nand_interface_config *conf)
{
struct fsmc_nand_data *host = nand_to_fsmc(nand);
struct fsmc_nand_timings tims;
const struct nand_sdr_timings *sdrt;
int ret;
sdrt = nand_get_sdr_timings(conf);
if (IS_ERR(sdrt))
return PTR_ERR(sdrt);
ret = fsmc_calc_timings(host, sdrt, &tims);
if (ret)
return ret;
if (csline == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
fsmc_nand_setup(host, &tims);
return 0;
}
/*
* fsmc_enable_hwecc - Enables Hardware ECC through FSMC registers
*/
static void fsmc_enable_hwecc(struct nand_chip *chip, int mode)
{
struct fsmc_nand_data *host = nand_to_fsmc(chip);
writel_relaxed(readl(host->regs_va + FSMC_PC) & ~FSMC_ECCPLEN_256,
host->regs_va + FSMC_PC);
writel_relaxed(readl(host->regs_va + FSMC_PC) & ~FSMC_ECCEN,
host->regs_va + FSMC_PC);
writel_relaxed(readl(host->regs_va + FSMC_PC) | FSMC_ECCEN,
host->regs_va + FSMC_PC);
}
/*
* fsmc_read_hwecc_ecc4 - Hardware ECC calculator for ecc4 option supported by
* FSMC. ECC is 13 bytes for 512 bytes of data (supports error correction up to
* max of 8-bits)
*/
static int fsmc_read_hwecc_ecc4(struct nand_chip *chip, const u8 *data,
u8 *ecc)
{
struct fsmc_nand_data *host = nand_to_fsmc(chip);
u32 ecc_tmp;
unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
do {
if (readl_relaxed(host->regs_va + STS) & FSMC_CODE_RDY)
break;
cond_resched();
} while (!time_after_eq(jiffies, deadline));
if (time_after_eq(jiffies, deadline)) {
dev_err(host->dev, "calculate ecc timed out\n");
return -ETIMEDOUT;
}
ecc_tmp = readl_relaxed(host->regs_va + ECC1);
ecc[0] = ecc_tmp;
ecc[1] = ecc_tmp >> 8;
ecc[2] = ecc_tmp >> 16;
ecc[3] = ecc_tmp >> 24;
ecc_tmp = readl_relaxed(host->regs_va + ECC2);
ecc[4] = ecc_tmp;
ecc[5] = ecc_tmp >> 8;
ecc[6] = ecc_tmp >> 16;
ecc[7] = ecc_tmp >> 24;
ecc_tmp = readl_relaxed(host->regs_va + ECC3);
ecc[8] = ecc_tmp;
ecc[9] = ecc_tmp >> 8;
ecc[10] = ecc_tmp >> 16;
ecc[11] = ecc_tmp >> 24;
ecc_tmp = readl_relaxed(host->regs_va + STS);
ecc[12] = ecc_tmp >> 16;
return 0;
}
/*
* fsmc_read_hwecc_ecc1 - Hardware ECC calculator for ecc1 option supported by
* FSMC. ECC is 3 bytes for 512 bytes of data (supports error correction up to
* max of 1-bit)
*/
static int fsmc_read_hwecc_ecc1(struct nand_chip *chip, const u8 *data,
u8 *ecc)
{
struct fsmc_nand_data *host = nand_to_fsmc(chip);
u32 ecc_tmp;
ecc_tmp = readl_relaxed(host->regs_va + ECC1);
ecc[0] = ecc_tmp;
ecc[1] = ecc_tmp >> 8;
ecc[2] = ecc_tmp >> 16;
return 0;
}
static int fsmc_correct_ecc1(struct nand_chip *chip,
unsigned char *buf,
unsigned char *read_ecc,
unsigned char *calc_ecc)
{
bool sm_order = chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER;
return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
chip->ecc.size, sm_order);
}
/* Count the number of 0's in buff upto a max of max_bits */
static int count_written_bits(u8 *buff, int size, int max_bits)
{
int k, written_bits = 0;
for (k = 0; k < size; k++) {
written_bits += hweight8(~buff[k]);
if (written_bits > max_bits)
break;
}
return written_bits;
}
static void dma_complete(void *param)
{
struct fsmc_nand_data *host = param;
complete(&host->dma_access_complete);
}
static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
enum dma_data_direction direction)
{
struct dma_chan *chan;
struct dma_device *dma_dev;
struct dma_async_tx_descriptor *tx;
dma_addr_t dma_dst, dma_src, dma_addr;
dma_cookie_t cookie;
unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
int ret;
unsigned long time_left;
if (direction == DMA_TO_DEVICE)
chan = host->write_dma_chan;
else if (direction == DMA_FROM_DEVICE)
chan = host->read_dma_chan;
else
return -EINVAL;
dma_dev = chan->device;
dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
if (direction == DMA_TO_DEVICE) {
dma_src = dma_addr;
dma_dst = host->data_pa;
} else {
dma_src = host->data_pa;
dma_dst = dma_addr;
}
tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
len, flags);
if (!tx) {
dev_err(host->dev, "device_prep_dma_memcpy error\n");
ret = -EIO;
goto unmap_dma;
}
tx->callback = dma_complete;
tx->callback_param = host;
cookie = tx->tx_submit(tx);
ret = dma_submit_error(cookie);
if (ret) {
dev_err(host->dev, "dma_submit_error %d\n", cookie);
goto unmap_dma;
}
dma_async_issue_pending(chan);
time_left =
wait_for_completion_timeout(&host->dma_access_complete,
msecs_to_jiffies(3000));
if (time_left == 0) {
dmaengine_terminate_all(chan);
dev_err(host->dev, "wait_for_completion_timeout\n");
ret = -ETIMEDOUT;
goto unmap_dma;
}
ret = 0;
unmap_dma:
dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
return ret;
}
/*
* fsmc_write_buf - write buffer to chip
* @host: FSMC NAND controller
* @buf: data buffer
* @len: number of bytes to write
*/
static void fsmc_write_buf(struct fsmc_nand_data *host, const u8 *buf,
int len)
{
int i;
if (IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
IS_ALIGNED(len, sizeof(u32))) {
u32 *p = (u32 *)buf;
len = len >> 2;
for (i = 0; i < len; i++)
writel_relaxed(p[i], host->data_va);
} else {
for (i = 0; i < len; i++)
writeb_relaxed(buf[i], host->data_va);
}
}
/*
* fsmc_read_buf - read chip data into buffer
* @host: FSMC NAND controller
* @buf: buffer to store date
* @len: number of bytes to read
*/
static void fsmc_read_buf(struct fsmc_nand_data *host, u8 *buf, int len)
{
int i;
if (IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
IS_ALIGNED(len, sizeof(u32))) {
u32 *p = (u32 *)buf;
len = len >> 2;
for (i = 0; i < len; i++)
p[i] = readl_relaxed(host->data_va);
} else {
for (i = 0; i < len; i++)
buf[i] = readb_relaxed(host->data_va);
}
}
/*
* fsmc_read_buf_dma - read chip data into buffer
* @host: FSMC NAND controller
* @buf: buffer to store date
* @len: number of bytes to read
*/
static void fsmc_read_buf_dma(struct fsmc_nand_data *host, u8 *buf,
int len)
{
dma_xfer(host, buf, len, DMA_FROM_DEVICE);
}
/*
* fsmc_write_buf_dma - write buffer to chip
* @host: FSMC NAND controller
* @buf: data buffer
* @len: number of bytes to write
*/
static void fsmc_write_buf_dma(struct fsmc_nand_data *host, const u8 *buf,
int len)
{
dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
}
/*
* fsmc_exec_op - hook called by the core to execute NAND operations
*
* This controller is simple enough and thus does not need to use the parser
* provided by the core, instead, handle every situation here.
*/
static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
bool check_only)
{
struct fsmc_nand_data *host = nand_to_fsmc(chip);
const struct nand_op_instr *instr = NULL;
int ret = 0;
unsigned int op_id;
int i;
if (check_only)
return 0;
pr_debug("Executing operation [%d instructions]:\n", op->ninstrs);
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
nand_op_trace(" ", instr);
switch (instr->type) {
case NAND_OP_CMD_INSTR:
writeb_relaxed(instr->ctx.cmd.opcode, host->cmd_va);
break;
case NAND_OP_ADDR_INSTR:
for (i = 0; i < instr->ctx.addr.naddrs; i++)
writeb_relaxed(instr->ctx.addr.addrs[i],
host->addr_va);
break;
case NAND_OP_DATA_IN_INSTR:
if (host->mode == USE_DMA_ACCESS)
fsmc_read_buf_dma(host, instr->ctx.data.buf.in,
instr->ctx.data.len);
else
fsmc_read_buf(host, instr->ctx.data.buf.in,
instr->ctx.data.len);
break;
case NAND_OP_DATA_OUT_INSTR:
if (host->mode == USE_DMA_ACCESS)
fsmc_write_buf_dma(host,
instr->ctx.data.buf.out,
instr->ctx.data.len);
else
fsmc_write_buf(host, instr->ctx.data.buf.out,
instr->ctx.data.len);
break;
case NAND_OP_WAITRDY_INSTR:
ret = nand_soft_waitrdy(chip,
instr->ctx.waitrdy.timeout_ms);
break;
}
if (instr->delay_ns)
ndelay(instr->delay_ns);
}
return ret;
}
/*
* fsmc_read_page_hwecc
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller expects OOB data read to chip->oob_poi
* @page: page number to read
*
* This routine is needed for fsmc version 8 as reading from NAND chip has to be
* performed in a strict sequence as follows:
* data(512 byte) -> ecc(13 byte)
* After this read, fsmc hardware generates and reports error data bits(up to a
* max of 8 bits)
*/
static int fsmc_read_page_hwecc(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int i, j, s, stat, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
u8 *p = buf;
u8 *ecc_calc = chip->ecc.calc_buf;
u8 *ecc_code = chip->ecc.code_buf;
int off, len, ret, group = 0;
/*
* ecc_oob is intentionally taken as u16. In 16bit devices, we
* end up reading 14 bytes (7 words) from oob. The local array is
* to maintain word alignment
*/
u16 ecc_oob[7];
u8 *oob = (u8 *)&ecc_oob[0];
unsigned int max_bitflips = 0;
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
nand_read_page_op(chip, page, s * eccsize, NULL, 0);
chip->ecc.hwctl(chip, NAND_ECC_READ);
ret = nand_read_data_op(chip, p, eccsize, false, false);
if (ret)
return ret;
for (j = 0; j < eccbytes;) {
struct mtd_oob_region oobregion;
ret = mtd_ooblayout_ecc(mtd, group++, &oobregion);
if (ret)
return ret;
off = oobregion.offset;
len = oobregion.length;
/*
* length is intentionally kept a higher multiple of 2
* to read at least 13 bytes even in case of 16 bit NAND
* devices
*/
if (chip->options & NAND_BUSWIDTH_16)
len = roundup(len, 2);
nand_read_oob_op(chip, page, off, oob + j, len);
j += len;
}
memcpy(&ecc_code[i], oob, chip->ecc.bytes);
chip->ecc.calculate(chip, p, &ecc_calc[i]);
stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
return max_bitflips;
}
/*
* fsmc_bch8_correct_data
* @mtd: mtd info structure
* @dat: buffer of read data
* @read_ecc: ecc read from device spare area
* @calc_ecc: ecc calculated from read data
*
* calc_ecc is a 104 bit information containing maximum of 8 error
* offset information of 13 bits each in 512 bytes of read data.
*/
static int fsmc_bch8_correct_data(struct nand_chip *chip, u8 *dat,
u8 *read_ecc, u8 *calc_ecc)
{
struct fsmc_nand_data *host = nand_to_fsmc(chip);
u32 err_idx[8];
u32 num_err, i;
u32 ecc1, ecc2, ecc3, ecc4;
num_err = (readl_relaxed(host->regs_va + STS) >> 10) & 0xF;
/* no bit flipping */
if (likely(num_err == 0))
return 0;
/* too many errors */
if (unlikely(num_err > 8)) {
/*
* This is a temporary erase check. A newly erased page read
* would result in an ecc error because the oob data is also
* erased to FF and the calculated ecc for an FF data is not
* FF..FF.
* This is a workaround to skip performing correction in case
* data is FF..FF
*
* Logic:
* For every page, each bit written as 0 is counted until these
* number of bits are greater than 8 (the maximum correction
* capability of FSMC for each 512 + 13 bytes)
*/
int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
int bits_data = count_written_bits(dat, chip->ecc.size, 8);
if ((bits_ecc + bits_data) <= 8) {
if (bits_data)
memset(dat, 0xff, chip->ecc.size);
return bits_data;
}
return -EBADMSG;
}
/*
* ------------------- calc_ecc[] bit wise -----------|--13 bits--|
* |---idx[7]--|--.....-----|---idx[2]--||---idx[1]--||---idx[0]--|
*
* calc_ecc is a 104 bit information containing maximum of 8 error
* offset information of 13 bits each. calc_ecc is copied into a
* u64 array and error offset indexes are populated in err_idx
* array
*/
ecc1 = readl_relaxed(host->regs_va + ECC1);
ecc2 = readl_relaxed(host->regs_va + ECC2);
ecc3 = readl_relaxed(host->regs_va + ECC3);
ecc4 = readl_relaxed(host->regs_va + STS);
err_idx[0] = (ecc1 >> 0) & 0x1FFF;
err_idx[1] = (ecc1 >> 13) & 0x1FFF;
err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
err_idx[3] = (ecc2 >> 7) & 0x1FFF;
err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
err_idx[5] = (ecc3 >> 1) & 0x1FFF;
err_idx[6] = (ecc3 >> 14) & 0x1FFF;
err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
i = 0;
while (num_err--) {
err_idx[i] ^= 3;
if (err_idx[i] < chip->ecc.size * 8) {
int err = err_idx[i];
dat[err >> 3] ^= BIT(err & 7);
i++;
}
}
return i;
}
static bool filter(struct dma_chan *chan, void *slave)
{
chan->private = slave;
return true;
}
static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
struct fsmc_nand_data *host,
struct nand_chip *nand)
{
struct device_node *np = pdev->dev.of_node;
u32 val;
int ret;
nand->options = 0;
if (!of_property_read_u32(np, "bank-width", &val)) {
if (val == 2) {
nand->options |= NAND_BUSWIDTH_16;
} else if (val != 1) {
dev_err(&pdev->dev, "invalid bank-width %u\n", val);
return -EINVAL;
}
}
if (of_property_read_bool(np, "nand-skip-bbtscan"))
nand->options |= NAND_SKIP_BBTSCAN;
host->dev_timings = devm_kzalloc(&pdev->dev,
sizeof(*host->dev_timings),
GFP_KERNEL);
if (!host->dev_timings)
return -ENOMEM;
ret = of_property_read_u8_array(np, "timings", (u8 *)host->dev_timings,
sizeof(*host->dev_timings));
if (ret)
host->dev_timings = NULL;
/* Set default NAND bank to 0 */
host->bank = 0;
if (!of_property_read_u32(np, "bank", &val)) {
if (val > 3) {
dev_err(&pdev->dev, "invalid bank %u\n", val);
return -EINVAL;
}
host->bank = val;
}
return 0;
}
static int fsmc_nand_attach_chip(struct nand_chip *nand)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct fsmc_nand_data *host = nand_to_fsmc(nand);
if (nand->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
if (!nand->ecc.size)
nand->ecc.size = 512;
if (AMBA_REV_BITS(host->pid) >= 8) {
nand->ecc.read_page = fsmc_read_page_hwecc;
nand->ecc.calculate = fsmc_read_hwecc_ecc4;
nand->ecc.correct = fsmc_bch8_correct_data;
nand->ecc.bytes = 13;
nand->ecc.strength = 8;
}
if (AMBA_REV_BITS(host->pid) >= 8) {
switch (mtd->oobsize) {
case 16:
case 64:
case 128:
case 224:
case 256:
break;
default:
dev_warn(host->dev,
"No oob scheme defined for oobsize %d\n",
mtd->oobsize);
return -EINVAL;
}
mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
return 0;
}
switch (nand->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_ON_HOST:
dev_info(host->dev, "Using 1-bit HW ECC scheme\n");
nand->ecc.calculate = fsmc_read_hwecc_ecc1;
nand->ecc.correct = fsmc_correct_ecc1;
nand->ecc.hwctl = fsmc_enable_hwecc;
nand->ecc.bytes = 3;
nand->ecc.strength = 1;
nand->ecc.options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
break;
case NAND_ECC_ENGINE_TYPE_SOFT:
if (nand->ecc.algo == NAND_ECC_ALGO_BCH) {
dev_info(host->dev,
"Using 4-bit SW BCH ECC scheme\n");
break;
}
break;
case NAND_ECC_ENGINE_TYPE_ON_DIE:
break;
default:
dev_err(host->dev, "Unsupported ECC mode!\n");
return -ENOTSUPP;
}
/*
* Don't set layout for BCH4 SW ECC. This will be
* generated later during BCH initialization.
*/
if (nand->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
switch (mtd->oobsize) {
case 16:
case 64:
case 128:
mtd_set_ooblayout(mtd,
&fsmc_ecc1_ooblayout_ops);
break;
default:
dev_warn(host->dev,
"No oob scheme defined for oobsize %d\n",
mtd->oobsize);
return -EINVAL;
}
}
return 0;
}
static const struct nand_controller_ops fsmc_nand_controller_ops = {
.attach_chip = fsmc_nand_attach_chip,
.exec_op = fsmc_exec_op,
.setup_interface = fsmc_setup_interface,
};
/**
* fsmc_nand_disable() - Disables the NAND bank
* @host: The instance to disable
*/
static void fsmc_nand_disable(struct fsmc_nand_data *host)
{
u32 val;
val = readl(host->regs_va + FSMC_PC);
val &= ~FSMC_ENABLE;
writel(val, host->regs_va + FSMC_PC);
}
/*
* fsmc_nand_probe - Probe function
* @pdev: platform device structure
*/
static int __init fsmc_nand_probe(struct platform_device *pdev)
{
struct fsmc_nand_data *host;
struct mtd_info *mtd;
struct nand_chip *nand;
struct resource *res;
void __iomem *base;
dma_cap_mask_t mask;
int ret = 0;
u32 pid;
int i;
/* Allocate memory for the device structure (and zero it) */
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
nand = &host->nand;
ret = fsmc_nand_probe_config_dt(pdev, host, nand);
if (ret)
return ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
host->data_va = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->data_va))
return PTR_ERR(host->data_va);
host->data_pa = (dma_addr_t)res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
host->addr_va = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->addr_va))
return PTR_ERR(host->addr_va);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
host->cmd_va = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->cmd_va))
return PTR_ERR(host->cmd_va);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
host->regs_va = base + FSMC_NOR_REG_SIZE +
(host->bank * FSMC_NAND_BANK_SZ);
host->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "failed to fetch block clock\n");
return PTR_ERR(host->clk);
}
/*
* This device ID is actually a common AMBA ID as used on the
* AMBA PrimeCell bus. However it is not a PrimeCell.
*/
for (pid = 0, i = 0; i < 4; i++)
pid |= (readl(base + resource_size(res) - 0x20 + 4 * i) &
255) << (i * 8);
host->pid = pid;
dev_info(&pdev->dev,
"FSMC device partno %03x, manufacturer %02x, revision %02x, config %02x\n",
AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
host->dev = &pdev->dev;
if (host->mode == USE_DMA_ACCESS)
init_completion(&host->dma_access_complete);
/* Link all private pointers */
mtd = nand_to_mtd(&host->nand);
nand_set_flash_node(nand, pdev->dev.of_node);
mtd->dev.parent = &pdev->dev;
nand->badblockbits = 7;
if (host->mode == USE_DMA_ACCESS) {
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
host->read_dma_chan = dma_request_channel(mask, filter, NULL);
if (!host->read_dma_chan) {
dev_err(&pdev->dev, "Unable to get read dma channel\n");
ret = -ENODEV;
goto disable_fsmc;
}
host->write_dma_chan = dma_request_channel(mask, filter, NULL);
if (!host->write_dma_chan) {
dev_err(&pdev->dev, "Unable to get write dma channel\n");
ret = -ENODEV;
goto release_dma_read_chan;
}
}
if (host->dev_timings) {
fsmc_nand_setup(host, host->dev_timings);
nand->options |= NAND_KEEP_TIMINGS;
}
nand_controller_init(&host->base);
host->base.ops = &fsmc_nand_controller_ops;
nand->controller = &host->base;
/*
* Scan to find existence of the device
*/
ret = nand_scan(nand, 1);
if (ret)
goto release_dma_write_chan;
mtd->name = "nand";
ret = mtd_device_register(mtd, NULL, 0);
if (ret)
goto cleanup_nand;
platform_set_drvdata(pdev, host);
dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
return 0;
cleanup_nand:
nand_cleanup(nand);
release_dma_write_chan:
if (host->mode == USE_DMA_ACCESS)
dma_release_channel(host->write_dma_chan);
release_dma_read_chan:
if (host->mode == USE_DMA_ACCESS)
dma_release_channel(host->read_dma_chan);
disable_fsmc:
fsmc_nand_disable(host);
return ret;
}
/*
* Clean up routine
*/
static void fsmc_nand_remove(struct platform_device *pdev)
{
struct fsmc_nand_data *host = platform_get_drvdata(pdev);
if (host) {
struct nand_chip *chip = &host->nand;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
fsmc_nand_disable(host);
if (host->mode == USE_DMA_ACCESS) {
dma_release_channel(host->write_dma_chan);
dma_release_channel(host->read_dma_chan);
}
}
}
#ifdef CONFIG_PM_SLEEP
static int fsmc_nand_suspend(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
if (host)
clk_disable_unprepare(host->clk);
return 0;
}
static int fsmc_nand_resume(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
int ret;
if (host) {
ret = clk_prepare_enable(host->clk);
if (ret) {
dev_err(dev, "failed to enable clk\n");
return ret;
}
if (host->dev_timings)
fsmc_nand_setup(host, host->dev_timings);
nand_reset(&host->nand, 0);
}
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
static const struct of_device_id fsmc_nand_id_table[] = {
{ .compatible = "st,spear600-fsmc-nand" },
{ .compatible = "stericsson,fsmc-nand" },
{}
};
MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
static struct platform_driver fsmc_nand_driver = {
.remove_new = fsmc_nand_remove,
.driver = {
.name = "fsmc-nand",
.of_match_table = fsmc_nand_id_table,
.pm = &fsmc_nand_pm_ops,
},
};
module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Vipin Kumar <[email protected]>, Ashish Priyadarshi");
MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");
| linux-master | drivers/mtd/nand/raw/fsmc_nand.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2003 Red Hat, Inc.
* (C) 2004 Dan Brown <[email protected]>
* (C) 2004 Kalev Lember <[email protected]>
*
* Author: David Woodhouse <[email protected]>
* Additional Diskonchip 2000 and Millennium support by Dan Brown <[email protected]>
* Diskonchip Millennium Plus support by Kalev Lember <[email protected]>
*
* Error correction code lifted from the old docecc code
* Author: Fabrice Bellard ([email protected])
* Copyright (C) 2000 Netgem S.A.
* converted to the generic Reed-Solomon library by Thomas Gleixner <[email protected]>
*
* Interface to generic NAND code for M-Systems DiskOnChip devices
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/rslib.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/doc2000.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/inftl.h>
#include <linux/module.h>
/* Where to look for the devices? */
#ifndef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS
#define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0
#endif
static unsigned long doc_locations[] __initdata = {
#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
#ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH
0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
#else
0xc8000, 0xca000, 0xcc000, 0xce000,
0xd0000, 0xd2000, 0xd4000, 0xd6000,
0xd8000, 0xda000, 0xdc000, 0xde000,
0xe0000, 0xe2000, 0xe4000, 0xe6000,
0xe8000, 0xea000, 0xec000, 0xee000,
#endif
#endif
0xffffffff };
static struct mtd_info *doclist = NULL;
struct doc_priv {
struct nand_controller base;
void __iomem *virtadr;
unsigned long physadr;
u_char ChipID;
u_char CDSNControl;
int chips_per_floor; /* The number of chips detected on each floor */
int curfloor;
int curchip;
int mh0_page;
int mh1_page;
struct rs_control *rs_decoder;
struct mtd_info *nextdoc;
bool supports_32b_reads;
/* Handle the last stage of initialization (BBT scan, partitioning) */
int (*late_init)(struct mtd_info *mtd);
};
/* This is the ecc value computed by the HW ecc generator upon writing an empty
page, one with all 0xff for data. */
static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
#define INFTL_BBT_RESERVED_BLOCKS 4
#define DoC_is_MillenniumPlus(doc) ((doc)->ChipID == DOC_ChipID_DocMilPlus16 || (doc)->ChipID == DOC_ChipID_DocMilPlus32)
#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
static int debug = 0;
module_param(debug, int, 0);
static int try_dword = 1;
module_param(try_dword, int, 0);
static int no_ecc_failures = 0;
module_param(no_ecc_failures, int, 0);
static int no_autopart = 0;
module_param(no_autopart, int, 0);
static int show_firmware_partition = 0;
module_param(show_firmware_partition, int, 0);
#ifdef CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE
static int inftl_bbt_write = 1;
#else
static int inftl_bbt_write = 0;
#endif
module_param(inftl_bbt_write, int, 0);
static unsigned long doc_config_location = CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS;
module_param(doc_config_location, ulong, 0);
MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
/* Sector size for HW ECC */
#define SECTOR_SIZE 512
/* The sector bytes are packed into NB_DATA 10 bit words */
#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / 10)
/* Number of roots */
#define NROOTS 4
/* First consective root */
#define FCR 510
/* Number of symbols */
#define NN 1023
/*
* The HW decoder in the DoC ASIC's provides us a error syndrome,
* which we must convert to a standard syndrome usable by the generic
* Reed-Solomon library code.
*
* Fabrice Bellard figured this out in the old docecc code. I added
* some comments, improved a minor bit and converted it to make use
* of the generic Reed-Solomon library. tglx
*/
static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
{
int i, j, nerr, errpos[8];
uint8_t parity;
uint16_t ds[4], s[5], tmp, errval[8], syn[4];
struct rs_codec *cd = rs->codec;
memset(syn, 0, sizeof(syn));
/* Convert the ecc bytes into words */
ds[0] = ((ecc[4] & 0xff) >> 0) | ((ecc[5] & 0x03) << 8);
ds[1] = ((ecc[5] & 0xfc) >> 2) | ((ecc[2] & 0x0f) << 6);
ds[2] = ((ecc[2] & 0xf0) >> 4) | ((ecc[3] & 0x3f) << 4);
ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
parity = ecc[1];
/* Initialize the syndrome buffer */
for (i = 0; i < NROOTS; i++)
s[i] = ds[0];
/*
* Evaluate
* s[i] = ds[3]x^3 + ds[2]x^2 + ds[1]x^1 + ds[0]
* where x = alpha^(FCR + i)
*/
for (j = 1; j < NROOTS; j++) {
if (ds[j] == 0)
continue;
tmp = cd->index_of[ds[j]];
for (i = 0; i < NROOTS; i++)
s[i] ^= cd->alpha_to[rs_modnn(cd, tmp + (FCR + i) * j)];
}
/* Calc syn[i] = s[i] / alpha^(v + i) */
for (i = 0; i < NROOTS; i++) {
if (s[i])
syn[i] = rs_modnn(cd, cd->index_of[s[i]] + (NN - FCR - i));
}
/* Call the decoder library */
nerr = decode_rs16(rs, NULL, NULL, 1019, syn, 0, errpos, 0, errval);
/* Incorrectable errors ? */
if (nerr < 0)
return nerr;
/*
* Correct the errors. The bitpositions are a bit of magic,
* but they are given by the design of the de/encoder circuit
* in the DoC ASIC's.
*/
for (i = 0; i < nerr; i++) {
int index, bitpos, pos = 1015 - errpos[i];
uint8_t val;
if (pos >= NB_DATA && pos < 1019)
continue;
if (pos < NB_DATA) {
/* extract bit position (MSB first) */
pos = 10 * (NB_DATA - 1 - pos) - 6;
/* now correct the following 10 bits. At most two bytes
can be modified since pos is even */
index = (pos >> 3) ^ 1;
bitpos = pos & 7;
if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
val = (uint8_t) (errval[i] >> (2 + bitpos));
parity ^= val;
if (index < SECTOR_SIZE)
data[index] ^= val;
}
index = ((pos >> 3) + 1) ^ 1;
bitpos = (bitpos + 10) & 7;
if (bitpos == 0)
bitpos = 8;
if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
val = (uint8_t) (errval[i] << (8 - bitpos));
parity ^= val;
if (index < SECTOR_SIZE)
data[index] ^= val;
}
}
}
/* If the parity is wrong, no rescue possible */
return parity ? -EBADMSG : nerr;
}
static void DoC_Delay(struct doc_priv *doc, unsigned short cycles)
{
volatile char __always_unused dummy;
int i;
for (i = 0; i < cycles; i++) {
if (DoC_is_Millennium(doc))
dummy = ReadDOC(doc->virtadr, NOP);
else if (DoC_is_MillenniumPlus(doc))
dummy = ReadDOC(doc->virtadr, Mplus_NOP);
else
dummy = ReadDOC(doc->virtadr, DOCStatus);
}
}
#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
static int _DoC_WaitReady(struct doc_priv *doc)
{
void __iomem *docptr = doc->virtadr;
unsigned long timeo = jiffies + (HZ * 10);
if (debug)
printk("_DoC_WaitReady...\n");
/* Out-of-line routine to wait for chip response */
if (DoC_is_MillenniumPlus(doc)) {
while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
if (time_after(jiffies, timeo)) {
printk("_DoC_WaitReady timed out.\n");
return -EIO;
}
udelay(1);
cond_resched();
}
} else {
while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
if (time_after(jiffies, timeo)) {
printk("_DoC_WaitReady timed out.\n");
return -EIO;
}
udelay(1);
cond_resched();
}
}
return 0;
}
static inline int DoC_WaitReady(struct doc_priv *doc)
{
void __iomem *docptr = doc->virtadr;
int ret = 0;
if (DoC_is_MillenniumPlus(doc)) {
DoC_Delay(doc, 4);
if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
/* Call the out-of-line routine to wait */
ret = _DoC_WaitReady(doc);
} else {
DoC_Delay(doc, 4);
if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
/* Call the out-of-line routine to wait */
ret = _DoC_WaitReady(doc);
DoC_Delay(doc, 2);
}
if (debug)
printk("DoC_WaitReady OK\n");
return ret;
}
static void doc2000_write_byte(struct nand_chip *this, u_char datum)
{
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
if (debug)
printk("write_byte %02x\n", datum);
WriteDOC(datum, docptr, CDSNSlowIO);
WriteDOC(datum, docptr, 2k_CDSN_IO);
}
static void doc2000_writebuf(struct nand_chip *this, const u_char *buf,
int len)
{
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int i;
if (debug)
printk("writebuf of %d bytes: ", len);
for (i = 0; i < len; i++) {
WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i);
if (debug && i < 16)
printk("%02x ", buf[i]);
}
if (debug)
printk("\n");
}
static void doc2000_readbuf(struct nand_chip *this, u_char *buf, int len)
{
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
u32 *buf32 = (u32 *)buf;
int i;
if (debug)
printk("readbuf of %d bytes: ", len);
if (!doc->supports_32b_reads ||
((((unsigned long)buf) | len) & 3)) {
for (i = 0; i < len; i++)
buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
} else {
for (i = 0; i < len / 4; i++)
buf32[i] = readl(docptr + DoC_2k_CDSN_IO + i);
}
}
/*
* We need our own readid() here because it's called before the NAND chip
* has been initialized, and calling nand_op_readid() would lead to a NULL
* pointer exception when dereferencing the NAND timings.
*/
static void doc200x_readid(struct nand_chip *this, unsigned int cs, u8 *id)
{
u8 addr = 0;
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_READID, 0),
NAND_OP_ADDR(1, &addr, 50),
NAND_OP_8BIT_DATA_IN(2, id, 0),
};
struct nand_operation op = NAND_OPERATION(cs, instrs);
if (!id)
op.ninstrs--;
this->controller->ops->exec_op(this, &op, false);
}
static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
{
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
uint16_t ret;
u8 id[2];
doc200x_readid(this, nr, id);
ret = ((u16)id[0] << 8) | id[1];
if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) {
/* First chip probe. See if we get same results by 32-bit access */
union {
uint32_t dword;
uint8_t byte[4];
} ident;
void __iomem *docptr = doc->virtadr;
doc200x_readid(this, nr, NULL);
ident.dword = readl(docptr + DoC_2k_CDSN_IO);
if (((ident.byte[0] << 8) | ident.byte[1]) == ret) {
pr_info("DiskOnChip 2000 responds to DWORD access\n");
doc->supports_32b_reads = true;
}
}
return ret;
}
static void __init doc2000_count_chips(struct mtd_info *mtd)
{
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
uint16_t mfrid;
int i;
/* Max 4 chips per floor on DiskOnChip 2000 */
doc->chips_per_floor = 4;
/* Find out what the first chip is */
mfrid = doc200x_ident_chip(mtd, 0);
/* Find how many chips in each floor. */
for (i = 1; i < 4; i++) {
if (doc200x_ident_chip(mtd, i) != mfrid)
break;
}
doc->chips_per_floor = i;
pr_debug("Detected %d chips per floor.\n", i);
}
static void doc2001_write_byte(struct nand_chip *this, u_char datum)
{
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
WriteDOC(datum, docptr, CDSNSlowIO);
WriteDOC(datum, docptr, Mil_CDSN_IO);
WriteDOC(datum, docptr, WritePipeTerm);
}
static void doc2001_writebuf(struct nand_chip *this, const u_char *buf, int len)
{
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int i;
for (i = 0; i < len; i++)
WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
/* Terminate write pipeline */
WriteDOC(0x00, docptr, WritePipeTerm);
}
static void doc2001_readbuf(struct nand_chip *this, u_char *buf, int len)
{
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int i;
/* Start read pipeline */
ReadDOC(docptr, ReadPipeInit);
for (i = 0; i < len - 1; i++)
buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
/* Terminate read pipeline */
buf[i] = ReadDOC(docptr, LastDataRead);
}
static void doc2001plus_writebuf(struct nand_chip *this, const u_char *buf, int len)
{
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int i;
if (debug)
printk("writebuf of %d bytes: ", len);
for (i = 0; i < len; i++) {
WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
if (debug && i < 16)
printk("%02x ", buf[i]);
}
if (debug)
printk("\n");
}
static void doc2001plus_readbuf(struct nand_chip *this, u_char *buf, int len)
{
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int i;
if (debug)
printk("readbuf of %d bytes: ", len);
/* Start read pipeline */
ReadDOC(docptr, Mplus_ReadPipeInit);
ReadDOC(docptr, Mplus_ReadPipeInit);
for (i = 0; i < len - 2; i++) {
buf[i] = ReadDOC(docptr, Mil_CDSN_IO);
if (debug && i < 16)
printk("%02x ", buf[i]);
}
/* Terminate read pipeline */
if (len >= 2) {
buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead);
if (debug && i < 16)
printk("%02x ", buf[len - 2]);
}
buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead);
if (debug && i < 16)
printk("%02x ", buf[len - 1]);
if (debug)
printk("\n");
}
static void doc200x_write_control(struct doc_priv *doc, u8 value)
{
WriteDOC(value, doc->virtadr, CDSNControl);
/* 11.4.3 -- 4 NOPs after CSDNControl write */
DoC_Delay(doc, 4);
}
static void doc200x_exec_instr(struct nand_chip *this,
const struct nand_op_instr *instr)
{
struct doc_priv *doc = nand_get_controller_data(this);
unsigned int i;
switch (instr->type) {
case NAND_OP_CMD_INSTR:
doc200x_write_control(doc, CDSN_CTRL_CE | CDSN_CTRL_CLE);
doc2000_write_byte(this, instr->ctx.cmd.opcode);
break;
case NAND_OP_ADDR_INSTR:
doc200x_write_control(doc, CDSN_CTRL_CE | CDSN_CTRL_ALE);
for (i = 0; i < instr->ctx.addr.naddrs; i++) {
u8 addr = instr->ctx.addr.addrs[i];
if (DoC_is_2000(doc))
doc2000_write_byte(this, addr);
else
doc2001_write_byte(this, addr);
}
break;
case NAND_OP_DATA_IN_INSTR:
doc200x_write_control(doc, CDSN_CTRL_CE);
if (DoC_is_2000(doc))
doc2000_readbuf(this, instr->ctx.data.buf.in,
instr->ctx.data.len);
else
doc2001_readbuf(this, instr->ctx.data.buf.in,
instr->ctx.data.len);
break;
case NAND_OP_DATA_OUT_INSTR:
doc200x_write_control(doc, CDSN_CTRL_CE);
if (DoC_is_2000(doc))
doc2000_writebuf(this, instr->ctx.data.buf.out,
instr->ctx.data.len);
else
doc2001_writebuf(this, instr->ctx.data.buf.out,
instr->ctx.data.len);
break;
case NAND_OP_WAITRDY_INSTR:
DoC_WaitReady(doc);
break;
}
if (instr->delay_ns)
ndelay(instr->delay_ns);
}
static int doc200x_exec_op(struct nand_chip *this,
const struct nand_operation *op,
bool check_only)
{
struct doc_priv *doc = nand_get_controller_data(this);
unsigned int i;
if (check_only)
return true;
doc->curchip = op->cs % doc->chips_per_floor;
doc->curfloor = op->cs / doc->chips_per_floor;
WriteDOC(doc->curfloor, doc->virtadr, FloorSelect);
WriteDOC(doc->curchip, doc->virtadr, CDSNDeviceSelect);
/* Assert CE pin */
doc200x_write_control(doc, CDSN_CTRL_CE);
for (i = 0; i < op->ninstrs; i++)
doc200x_exec_instr(this, &op->instrs[i]);
/* De-assert CE pin */
doc200x_write_control(doc, 0);
return 0;
}
static void doc2001plus_write_pipe_term(struct doc_priv *doc)
{
WriteDOC(0x00, doc->virtadr, Mplus_WritePipeTerm);
WriteDOC(0x00, doc->virtadr, Mplus_WritePipeTerm);
}
static void doc2001plus_exec_instr(struct nand_chip *this,
const struct nand_op_instr *instr)
{
struct doc_priv *doc = nand_get_controller_data(this);
unsigned int i;
switch (instr->type) {
case NAND_OP_CMD_INSTR:
WriteDOC(instr->ctx.cmd.opcode, doc->virtadr, Mplus_FlashCmd);
doc2001plus_write_pipe_term(doc);
break;
case NAND_OP_ADDR_INSTR:
for (i = 0; i < instr->ctx.addr.naddrs; i++) {
u8 addr = instr->ctx.addr.addrs[i];
WriteDOC(addr, doc->virtadr, Mplus_FlashAddress);
}
doc2001plus_write_pipe_term(doc);
/* deassert ALE */
WriteDOC(0, doc->virtadr, Mplus_FlashControl);
break;
case NAND_OP_DATA_IN_INSTR:
doc2001plus_readbuf(this, instr->ctx.data.buf.in,
instr->ctx.data.len);
break;
case NAND_OP_DATA_OUT_INSTR:
doc2001plus_writebuf(this, instr->ctx.data.buf.out,
instr->ctx.data.len);
doc2001plus_write_pipe_term(doc);
break;
case NAND_OP_WAITRDY_INSTR:
DoC_WaitReady(doc);
break;
}
if (instr->delay_ns)
ndelay(instr->delay_ns);
}
static int doc2001plus_exec_op(struct nand_chip *this,
const struct nand_operation *op,
bool check_only)
{
struct doc_priv *doc = nand_get_controller_data(this);
unsigned int i;
if (check_only)
return true;
doc->curchip = op->cs % doc->chips_per_floor;
doc->curfloor = op->cs / doc->chips_per_floor;
/* Assert ChipEnable and deassert WriteProtect */
WriteDOC(DOC_FLASH_CE, doc->virtadr, Mplus_FlashSelect);
for (i = 0; i < op->ninstrs; i++)
doc2001plus_exec_instr(this, &op->instrs[i]);
/* De-assert ChipEnable */
WriteDOC(0, doc->virtadr, Mplus_FlashSelect);
return 0;
}
static void doc200x_enable_hwecc(struct nand_chip *this, int mode)
{
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
/* Prime the ECC engine */
switch (mode) {
case NAND_ECC_READ:
WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
WriteDOC(DOC_ECC_EN, docptr, ECCConf);
break;
case NAND_ECC_WRITE:
WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
break;
}
}
static void doc2001plus_enable_hwecc(struct nand_chip *this, int mode)
{
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
/* Prime the ECC engine */
switch (mode) {
case NAND_ECC_READ:
WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
break;
case NAND_ECC_WRITE:
WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
break;
}
}
/* This code is only called on write */
static int doc200x_calculate_ecc(struct nand_chip *this, const u_char *dat,
unsigned char *ecc_code)
{
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int i;
int __always_unused emptymatch = 1;
/* flush the pipeline */
if (DoC_is_2000(doc)) {
WriteDOC(doc->CDSNControl & ~CDSN_CTRL_FLASH_IO, docptr, CDSNControl);
WriteDOC(0, docptr, 2k_CDSN_IO);
WriteDOC(0, docptr, 2k_CDSN_IO);
WriteDOC(0, docptr, 2k_CDSN_IO);
WriteDOC(doc->CDSNControl, docptr, CDSNControl);
} else if (DoC_is_MillenniumPlus(doc)) {
WriteDOC(0, docptr, Mplus_NOP);
WriteDOC(0, docptr, Mplus_NOP);
WriteDOC(0, docptr, Mplus_NOP);
} else {
WriteDOC(0, docptr, NOP);
WriteDOC(0, docptr, NOP);
WriteDOC(0, docptr, NOP);
}
for (i = 0; i < 6; i++) {
if (DoC_is_MillenniumPlus(doc))
ecc_code[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
else
ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
if (ecc_code[i] != empty_write_ecc[i])
emptymatch = 0;
}
if (DoC_is_MillenniumPlus(doc))
WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
else
WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
#if 0
/* If emptymatch=1, we might have an all-0xff data buffer. Check. */
if (emptymatch) {
/* Note: this somewhat expensive test should not be triggered
often. It could be optimized away by examining the data in
the writebuf routine, and remembering the result. */
for (i = 0; i < 512; i++) {
if (dat[i] == 0xff)
continue;
emptymatch = 0;
break;
}
}
/* If emptymatch still =1, we do have an all-0xff data buffer.
Return all-0xff ecc value instead of the computed one, so
it'll look just like a freshly-erased page. */
if (emptymatch)
memset(ecc_code, 0xff, 6);
#endif
return 0;
}
static int doc200x_correct_data(struct nand_chip *this, u_char *dat,
u_char *read_ecc, u_char *isnull)
{
int i, ret = 0;
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
uint8_t calc_ecc[6];
volatile u_char dummy;
/* flush the pipeline */
if (DoC_is_2000(doc)) {
dummy = ReadDOC(docptr, 2k_ECCStatus);
dummy = ReadDOC(docptr, 2k_ECCStatus);
dummy = ReadDOC(docptr, 2k_ECCStatus);
} else if (DoC_is_MillenniumPlus(doc)) {
dummy = ReadDOC(docptr, Mplus_ECCConf);
dummy = ReadDOC(docptr, Mplus_ECCConf);
dummy = ReadDOC(docptr, Mplus_ECCConf);
} else {
dummy = ReadDOC(docptr, ECCConf);
dummy = ReadDOC(docptr, ECCConf);
dummy = ReadDOC(docptr, ECCConf);
}
/* Error occurred ? */
if (dummy & 0x80) {
for (i = 0; i < 6; i++) {
if (DoC_is_MillenniumPlus(doc))
calc_ecc[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
else
calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
}
ret = doc_ecc_decode(doc->rs_decoder, dat, calc_ecc);
if (ret > 0)
pr_err("doc200x_correct_data corrected %d errors\n",
ret);
}
if (DoC_is_MillenniumPlus(doc))
WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
else
WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
if (no_ecc_failures && mtd_is_eccerr(ret)) {
pr_err("suppressing ECC failure\n");
ret = 0;
}
return ret;
}
//u_char mydatabuf[528];
static int doc200x_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section)
return -ERANGE;
oobregion->offset = 0;
oobregion->length = 6;
return 0;
}
static int doc200x_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section > 1)
return -ERANGE;
/*
* The strange out-of-order free bytes definition is a (possibly
* unneeded) attempt to retain compatibility. It used to read:
* .oobfree = { {8, 8} }
* Since that leaves two bytes unusable, it was changed. But the
* following scheme might affect existing jffs2 installs by moving the
* cleanmarker:
* .oobfree = { {6, 10} }
* jffs2 seems to handle the above gracefully, but the current scheme
* seems safer. The only problem with it is that any code retrieving
* free bytes position must be able to handle out-of-order segments.
*/
if (!section) {
oobregion->offset = 8;
oobregion->length = 8;
} else {
oobregion->offset = 6;
oobregion->length = 2;
}
return 0;
}
static const struct mtd_ooblayout_ops doc200x_ooblayout_ops = {
.ecc = doc200x_ooblayout_ecc,
.free = doc200x_ooblayout_free,
};
/* Find the (I)NFTL Media Header, and optionally also the mirror media header.
On successful return, buf will contain a copy of the media header for
further processing. id is the string to scan for, and will presumably be
either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media
header. The page #s of the found media headers are placed in mh0_page and
mh1_page in the DOC private structure. */
static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const char *id, int findmirror)
{
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
unsigned offs;
int ret;
size_t retlen;
for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
if (retlen != mtd->writesize)
continue;
if (ret) {
pr_warn("ECC error scanning DOC at 0x%x\n", offs);
}
if (memcmp(buf, id, 6))
continue;
pr_info("Found DiskOnChip %s Media Header at 0x%x\n", id, offs);
if (doc->mh0_page == -1) {
doc->mh0_page = offs >> this->page_shift;
if (!findmirror)
return 1;
continue;
}
doc->mh1_page = offs >> this->page_shift;
return 2;
}
if (doc->mh0_page == -1) {
pr_warn("DiskOnChip %s Media Header not found.\n", id);
return 0;
}
/* Only one mediaheader was found. We want buf to contain a
mediaheader on return, so we'll have to re-read the one we found. */
offs = doc->mh0_page << this->page_shift;
ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
if (retlen != mtd->writesize) {
/* Insanity. Give up. */
pr_err("Read DiskOnChip Media Header once, but can't reread it???\n");
return 0;
}
return 1;
}
static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
{
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
struct nand_memory_organization *memorg;
int ret = 0;
u_char *buf;
struct NFTLMediaHeader *mh;
const unsigned psize = 1 << this->page_shift;
int numparts = 0;
unsigned blocks, maxblocks;
int offs, numheaders;
memorg = nanddev_get_memorg(&this->base);
buf = kmalloc(mtd->writesize, GFP_KERNEL);
if (!buf) {
return 0;
}
if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1)))
goto out;
mh = (struct NFTLMediaHeader *)buf;
le16_to_cpus(&mh->NumEraseUnits);
le16_to_cpus(&mh->FirstPhysicalEUN);
le32_to_cpus(&mh->FormattedSize);
pr_info(" DataOrgID = %s\n"
" NumEraseUnits = %d\n"
" FirstPhysicalEUN = %d\n"
" FormattedSize = %d\n"
" UnitSizeFactor = %d\n",
mh->DataOrgID, mh->NumEraseUnits,
mh->FirstPhysicalEUN, mh->FormattedSize,
mh->UnitSizeFactor);
blocks = mtd->size >> this->phys_erase_shift;
maxblocks = min(32768U, mtd->erasesize - psize);
if (mh->UnitSizeFactor == 0x00) {
/* Auto-determine UnitSizeFactor. The constraints are:
- There can be at most 32768 virtual blocks.
- There can be at most (virtual block size - page size)
virtual blocks (because MediaHeader+BBT must fit in 1).
*/
mh->UnitSizeFactor = 0xff;
while (blocks > maxblocks) {
blocks >>= 1;
maxblocks = min(32768U, (maxblocks << 1) + psize);
mh->UnitSizeFactor--;
}
pr_warn("UnitSizeFactor=0x00 detected. Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor);
}
/* NOTE: The lines below modify internal variables of the NAND and MTD
layers; variables with have already been configured by nand_scan.
Unfortunately, we didn't know before this point what these values
should be. Thus, this code is somewhat dependent on the exact
implementation of the NAND layer. */
if (mh->UnitSizeFactor != 0xff) {
this->bbt_erase_shift += (0xff - mh->UnitSizeFactor);
memorg->pages_per_eraseblock <<= (0xff - mh->UnitSizeFactor);
mtd->erasesize <<= (0xff - mh->UnitSizeFactor);
pr_info("Setting virtual erase size to %d\n", mtd->erasesize);
blocks = mtd->size >> this->bbt_erase_shift;
maxblocks = min(32768U, mtd->erasesize - psize);
}
if (blocks > maxblocks) {
pr_err("UnitSizeFactor of 0x%02x is inconsistent with device size. Aborting.\n", mh->UnitSizeFactor);
goto out;
}
/* Skip past the media headers. */
offs = max(doc->mh0_page, doc->mh1_page);
offs <<= this->page_shift;
offs += mtd->erasesize;
if (show_firmware_partition == 1) {
parts[0].name = " DiskOnChip Firmware / Media Header partition";
parts[0].offset = 0;
parts[0].size = offs;
numparts = 1;
}
parts[numparts].name = " DiskOnChip BDTL partition";
parts[numparts].offset = offs;
parts[numparts].size = (mh->NumEraseUnits - numheaders) << this->bbt_erase_shift;
offs += parts[numparts].size;
numparts++;
if (offs < mtd->size) {
parts[numparts].name = " DiskOnChip Remainder partition";
parts[numparts].offset = offs;
parts[numparts].size = mtd->size - offs;
numparts++;
}
ret = numparts;
out:
kfree(buf);
return ret;
}
/* This is a stripped-down copy of the code in inftlmount.c */
static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
{
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
int ret = 0;
u_char *buf;
struct INFTLMediaHeader *mh;
struct INFTLPartition *ip;
int numparts = 0;
int blocks;
int vshift, lastvunit = 0;
int i;
int end = mtd->size;
if (inftl_bbt_write)
end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift);
buf = kmalloc(mtd->writesize, GFP_KERNEL);
if (!buf) {
return 0;
}
if (!find_media_headers(mtd, buf, "BNAND", 0))
goto out;
doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift);
mh = (struct INFTLMediaHeader *)buf;
le32_to_cpus(&mh->NoOfBootImageBlocks);
le32_to_cpus(&mh->NoOfBinaryPartitions);
le32_to_cpus(&mh->NoOfBDTLPartitions);
le32_to_cpus(&mh->BlockMultiplierBits);
le32_to_cpus(&mh->FormatFlags);
le32_to_cpus(&mh->PercentUsed);
pr_info(" bootRecordID = %s\n"
" NoOfBootImageBlocks = %d\n"
" NoOfBinaryPartitions = %d\n"
" NoOfBDTLPartitions = %d\n"
" BlockMultiplierBits = %d\n"
" FormatFlgs = %d\n"
" OsakVersion = %d.%d.%d.%d\n"
" PercentUsed = %d\n",
mh->bootRecordID, mh->NoOfBootImageBlocks,
mh->NoOfBinaryPartitions,
mh->NoOfBDTLPartitions,
mh->BlockMultiplierBits, mh->FormatFlags,
((unsigned char *) &mh->OsakVersion)[0] & 0xf,
((unsigned char *) &mh->OsakVersion)[1] & 0xf,
((unsigned char *) &mh->OsakVersion)[2] & 0xf,
((unsigned char *) &mh->OsakVersion)[3] & 0xf,
mh->PercentUsed);
vshift = this->phys_erase_shift + mh->BlockMultiplierBits;
blocks = mtd->size >> vshift;
if (blocks > 32768) {
pr_err("BlockMultiplierBits=%d is inconsistent with device size. Aborting.\n", mh->BlockMultiplierBits);
goto out;
}
blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift);
if (inftl_bbt_write && (blocks > mtd->erasesize)) {
pr_err("Writeable BBTs spanning more than one erase block are not yet supported. FIX ME!\n");
goto out;
}
/* Scan the partitions */
for (i = 0; (i < 4); i++) {
ip = &(mh->Partitions[i]);
le32_to_cpus(&ip->virtualUnits);
le32_to_cpus(&ip->firstUnit);
le32_to_cpus(&ip->lastUnit);
le32_to_cpus(&ip->flags);
le32_to_cpus(&ip->spareUnits);
le32_to_cpus(&ip->Reserved0);
pr_info(" PARTITION[%d] ->\n"
" virtualUnits = %d\n"
" firstUnit = %d\n"
" lastUnit = %d\n"
" flags = 0x%x\n"
" spareUnits = %d\n",
i, ip->virtualUnits, ip->firstUnit,
ip->lastUnit, ip->flags,
ip->spareUnits);
if ((show_firmware_partition == 1) &&
(i == 0) && (ip->firstUnit > 0)) {
parts[0].name = " DiskOnChip IPL / Media Header partition";
parts[0].offset = 0;
parts[0].size = mtd->erasesize * ip->firstUnit;
numparts = 1;
}
if (ip->flags & INFTL_BINARY)
parts[numparts].name = " DiskOnChip BDK partition";
else
parts[numparts].name = " DiskOnChip BDTL partition";
parts[numparts].offset = ip->firstUnit << vshift;
parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift;
numparts++;
if (ip->lastUnit > lastvunit)
lastvunit = ip->lastUnit;
if (ip->flags & INFTL_LAST)
break;
}
lastvunit++;
if ((lastvunit << vshift) < end) {
parts[numparts].name = " DiskOnChip Remainder partition";
parts[numparts].offset = lastvunit << vshift;
parts[numparts].size = end - parts[numparts].offset;
numparts++;
}
ret = numparts;
out:
kfree(buf);
return ret;
}
static int __init nftl_scan_bbt(struct mtd_info *mtd)
{
int ret, numparts;
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
struct mtd_partition parts[2];
memset((char *)parts, 0, sizeof(parts));
/* On NFTL, we have to find the media headers before we can read the
BBTs, since they're stored in the media header eraseblocks. */
numparts = nftl_partscan(mtd, parts);
if (!numparts)
return -EIO;
this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
NAND_BBT_VERSION;
this->bbt_td->veroffs = 7;
this->bbt_td->pages[0] = doc->mh0_page + 1;
if (doc->mh1_page != -1) {
this->bbt_md->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
NAND_BBT_VERSION;
this->bbt_md->veroffs = 7;
this->bbt_md->pages[0] = doc->mh1_page + 1;
} else {
this->bbt_md = NULL;
}
ret = nand_create_bbt(this);
if (ret)
return ret;
return mtd_device_register(mtd, parts, no_autopart ? 0 : numparts);
}
static int __init inftl_scan_bbt(struct mtd_info *mtd)
{
int ret, numparts;
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
struct mtd_partition parts[5];
if (nanddev_ntargets(&this->base) > doc->chips_per_floor) {
pr_err("Multi-floor INFTL devices not yet supported.\n");
return -EIO;
}
if (DoC_is_MillenniumPlus(doc)) {
this->bbt_td->options = NAND_BBT_2BIT | NAND_BBT_ABSPAGE;
if (inftl_bbt_write)
this->bbt_td->options |= NAND_BBT_WRITE;
this->bbt_td->pages[0] = 2;
this->bbt_md = NULL;
} else {
this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
if (inftl_bbt_write)
this->bbt_td->options |= NAND_BBT_WRITE;
this->bbt_td->offs = 8;
this->bbt_td->len = 8;
this->bbt_td->veroffs = 7;
this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
this->bbt_td->reserved_block_code = 0x01;
this->bbt_td->pattern = "MSYS_BBT";
this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
if (inftl_bbt_write)
this->bbt_md->options |= NAND_BBT_WRITE;
this->bbt_md->offs = 8;
this->bbt_md->len = 8;
this->bbt_md->veroffs = 7;
this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
this->bbt_md->reserved_block_code = 0x01;
this->bbt_md->pattern = "TBB_SYSM";
}
ret = nand_create_bbt(this);
if (ret)
return ret;
memset((char *)parts, 0, sizeof(parts));
numparts = inftl_partscan(mtd, parts);
/* At least for now, require the INFTL Media Header. We could probably
do without it for non-INFTL use, since all it gives us is
autopartitioning, but I want to give it more thought. */
if (!numparts)
return -EIO;
return mtd_device_register(mtd, parts, no_autopart ? 0 : numparts);
}
static inline int __init doc2000_init(struct mtd_info *mtd)
{
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
doc->late_init = nftl_scan_bbt;
doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
doc2000_count_chips(mtd);
mtd->name = "DiskOnChip 2000 (NFTL Model)";
return (4 * doc->chips_per_floor);
}
static inline int __init doc2001_init(struct mtd_info *mtd)
{
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
ReadDOC(doc->virtadr, ChipID);
ReadDOC(doc->virtadr, ChipID);
ReadDOC(doc->virtadr, ChipID);
if (ReadDOC(doc->virtadr, ChipID) != DOC_ChipID_DocMil) {
/* It's not a Millennium; it's one of the newer
DiskOnChip 2000 units with a similar ASIC.
Treat it like a Millennium, except that it
can have multiple chips. */
doc2000_count_chips(mtd);
mtd->name = "DiskOnChip 2000 (INFTL Model)";
doc->late_init = inftl_scan_bbt;
return (4 * doc->chips_per_floor);
} else {
/* Bog-standard Millennium */
doc->chips_per_floor = 1;
mtd->name = "DiskOnChip Millennium";
doc->late_init = nftl_scan_bbt;
return 1;
}
}
static inline int __init doc2001plus_init(struct mtd_info *mtd)
{
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
doc->late_init = inftl_scan_bbt;
this->ecc.hwctl = doc2001plus_enable_hwecc;
doc->chips_per_floor = 1;
mtd->name = "DiskOnChip Millennium Plus";
return 1;
}
static int doc200x_attach_chip(struct nand_chip *chip)
{
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return 0;
chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
chip->ecc.size = 512;
chip->ecc.bytes = 6;
chip->ecc.strength = 2;
chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
chip->ecc.hwctl = doc200x_enable_hwecc;
chip->ecc.calculate = doc200x_calculate_ecc;
chip->ecc.correct = doc200x_correct_data;
return 0;
}
static const struct nand_controller_ops doc200x_ops = {
.exec_op = doc200x_exec_op,
.attach_chip = doc200x_attach_chip,
};
static const struct nand_controller_ops doc2001plus_ops = {
.exec_op = doc2001plus_exec_op,
.attach_chip = doc200x_attach_chip,
};
static int __init doc_probe(unsigned long physadr)
{
struct nand_chip *nand = NULL;
struct doc_priv *doc = NULL;
unsigned char ChipID;
struct mtd_info *mtd;
void __iomem *virtadr;
unsigned char save_control;
unsigned char tmp, tmpb, tmpc;
int reg, len, numchips;
int ret = 0;
if (!request_mem_region(physadr, DOC_IOREMAP_LEN, "DiskOnChip"))
return -EBUSY;
virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
if (!virtadr) {
pr_err("Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n",
DOC_IOREMAP_LEN, physadr);
ret = -EIO;
goto error_ioremap;
}
/* It's not possible to cleanly detect the DiskOnChip - the
* bootup procedure will put the device into reset mode, and
* it's not possible to talk to it without actually writing
* to the DOCControl register. So we store the current contents
* of the DOCControl register's location, in case we later decide
* that it's not a DiskOnChip, and want to put it back how we
* found it.
*/
save_control = ReadDOC(virtadr, DOCControl);
/* Reset the DiskOnChip ASIC */
WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
/* Enable the DiskOnChip ASIC */
WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
ChipID = ReadDOC(virtadr, ChipID);
switch (ChipID) {
case DOC_ChipID_Doc2k:
reg = DoC_2k_ECCStatus;
break;
case DOC_ChipID_DocMil:
reg = DoC_ECCConf;
break;
case DOC_ChipID_DocMilPlus16:
case DOC_ChipID_DocMilPlus32:
case 0:
/* Possible Millennium Plus, need to do more checks */
/* Possibly release from power down mode */
for (tmp = 0; (tmp < 4); tmp++)
ReadDOC(virtadr, Mplus_Power);
/* Reset the Millennium Plus ASIC */
tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
WriteDOC(tmp, virtadr, Mplus_DOCControl);
WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
usleep_range(1000, 2000);
/* Enable the Millennium Plus ASIC */
tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
WriteDOC(tmp, virtadr, Mplus_DOCControl);
WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
usleep_range(1000, 2000);
ChipID = ReadDOC(virtadr, ChipID);
switch (ChipID) {
case DOC_ChipID_DocMilPlus16:
reg = DoC_Mplus_Toggle;
break;
case DOC_ChipID_DocMilPlus32:
pr_err("DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
fallthrough;
default:
ret = -ENODEV;
goto notfound;
}
break;
default:
ret = -ENODEV;
goto notfound;
}
/* Check the TOGGLE bit in the ECC register */
tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
if ((tmp == tmpb) || (tmp != tmpc)) {
pr_warn("Possible DiskOnChip at 0x%lx failed TOGGLE test, dropping.\n", physadr);
ret = -ENODEV;
goto notfound;
}
for (mtd = doclist; mtd; mtd = doc->nextdoc) {
unsigned char oldval;
unsigned char newval;
nand = mtd_to_nand(mtd);
doc = nand_get_controller_data(nand);
/* Use the alias resolution register to determine if this is
in fact the same DOC aliased to a new address. If writes
to one chip's alias resolution register change the value on
the other chip, they're the same chip. */
if (ChipID == DOC_ChipID_DocMilPlus16) {
oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
newval = ReadDOC(virtadr, Mplus_AliasResolution);
} else {
oldval = ReadDOC(doc->virtadr, AliasResolution);
newval = ReadDOC(virtadr, AliasResolution);
}
if (oldval != newval)
continue;
if (ChipID == DOC_ChipID_DocMilPlus16) {
WriteDOC(~newval, virtadr, Mplus_AliasResolution);
oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
WriteDOC(newval, virtadr, Mplus_AliasResolution); // restore it
} else {
WriteDOC(~newval, virtadr, AliasResolution);
oldval = ReadDOC(doc->virtadr, AliasResolution);
WriteDOC(newval, virtadr, AliasResolution); // restore it
}
newval = ~newval;
if (oldval == newval) {
pr_debug("Found alias of DOC at 0x%lx to 0x%lx\n",
doc->physadr, physadr);
goto notfound;
}
}
pr_notice("DiskOnChip found at 0x%lx\n", physadr);
len = sizeof(struct nand_chip) + sizeof(struct doc_priv) +
(2 * sizeof(struct nand_bbt_descr));
nand = kzalloc(len, GFP_KERNEL);
if (!nand) {
ret = -ENOMEM;
goto fail;
}
/*
* Allocate a RS codec instance
*
* Symbolsize is 10 (bits)
* Primitve polynomial is x^10+x^3+1
* First consecutive root is 510
* Primitve element to generate roots = 1
* Generator polinomial degree = 4
*/
doc = (struct doc_priv *) (nand + 1);
doc->rs_decoder = init_rs(10, 0x409, FCR, 1, NROOTS);
if (!doc->rs_decoder) {
pr_err("DiskOnChip: Could not create a RS codec\n");
ret = -ENOMEM;
goto fail;
}
nand_controller_init(&doc->base);
if (ChipID == DOC_ChipID_DocMilPlus16)
doc->base.ops = &doc2001plus_ops;
else
doc->base.ops = &doc200x_ops;
mtd = nand_to_mtd(nand);
nand->bbt_td = (struct nand_bbt_descr *) (doc + 1);
nand->bbt_md = nand->bbt_td + 1;
mtd->owner = THIS_MODULE;
mtd_set_ooblayout(mtd, &doc200x_ooblayout_ops);
nand->controller = &doc->base;
nand_set_controller_data(nand, doc);
nand->bbt_options = NAND_BBT_USE_FLASH;
/* Skip the automatic BBT scan so we can run it manually */
nand->options |= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK;
doc->physadr = physadr;
doc->virtadr = virtadr;
doc->ChipID = ChipID;
doc->curfloor = -1;
doc->curchip = -1;
doc->mh0_page = -1;
doc->mh1_page = -1;
doc->nextdoc = doclist;
if (ChipID == DOC_ChipID_Doc2k)
numchips = doc2000_init(mtd);
else if (ChipID == DOC_ChipID_DocMilPlus16)
numchips = doc2001plus_init(mtd);
else
numchips = doc2001_init(mtd);
if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) {
/* DBB note: i believe nand_cleanup is necessary here, as
buffers may have been allocated in nand_base. Check with
Thomas. FIX ME! */
nand_cleanup(nand);
goto fail;
}
/* Success! */
doclist = mtd;
return 0;
notfound:
/* Put back the contents of the DOCControl register, in case it's not
actually a DiskOnChip. */
WriteDOC(save_control, virtadr, DOCControl);
fail:
if (doc)
free_rs(doc->rs_decoder);
kfree(nand);
iounmap(virtadr);
error_ioremap:
release_mem_region(physadr, DOC_IOREMAP_LEN);
return ret;
}
static void release_nanddoc(void)
{
struct mtd_info *mtd, *nextmtd;
struct nand_chip *nand;
struct doc_priv *doc;
int ret;
for (mtd = doclist; mtd; mtd = nextmtd) {
nand = mtd_to_nand(mtd);
doc = nand_get_controller_data(nand);
nextmtd = doc->nextdoc;
ret = mtd_device_unregister(mtd);
WARN_ON(ret);
nand_cleanup(nand);
iounmap(doc->virtadr);
release_mem_region(doc->physadr, DOC_IOREMAP_LEN);
free_rs(doc->rs_decoder);
kfree(nand);
}
}
static int __init init_nanddoc(void)
{
int i, ret = 0;
if (doc_config_location) {
pr_info("Using configured DiskOnChip probe address 0x%lx\n",
doc_config_location);
ret = doc_probe(doc_config_location);
if (ret < 0)
return ret;
} else {
for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
doc_probe(doc_locations[i]);
}
}
/* No banner message any more. Print a message if no DiskOnChip
found, so the user knows we at least tried. */
if (!doclist) {
pr_info("No valid DiskOnChip devices found\n");
ret = -ENODEV;
}
return ret;
}
static void __exit cleanup_nanddoc(void)
{
/* Cleanup the nand/DoC resources */
release_nanddoc();
}
module_init(init_nanddoc);
module_exit(cleanup_nanddoc);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]>");
MODULE_DESCRIPTION("M-Systems DiskOnChip 2000, Millennium and Millennium Plus device driver");
| linux-master | drivers/mtd/nand/raw/diskonchip.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2007 PA Semi, Inc
*
* Author: Egor Martovetsky <[email protected]>
* Maintained by: Olof Johansson <[email protected]>
*
* Driver for the PWRficient onchip NAND flash interface
*/
#undef DEBUG
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <asm/io.h>
#define LBICTRL_LPCCTL_NR 0x00004000
#define CLE_PIN_CTL 15
#define ALE_PIN_CTL 14
struct pasemi_ddata {
struct nand_chip chip;
unsigned int lpcctl;
struct nand_controller controller;
};
static const char driver_name[] = "pasemi-nand";
static void pasemi_read_buf(struct nand_chip *chip, u_char *buf, int len)
{
while (len > 0x800) {
memcpy_fromio(buf, chip->legacy.IO_ADDR_R, 0x800);
buf += 0x800;
len -= 0x800;
}
memcpy_fromio(buf, chip->legacy.IO_ADDR_R, len);
}
static void pasemi_write_buf(struct nand_chip *chip, const u_char *buf,
int len)
{
while (len > 0x800) {
memcpy_toio(chip->legacy.IO_ADDR_R, buf, 0x800);
buf += 0x800;
len -= 0x800;
}
memcpy_toio(chip->legacy.IO_ADDR_R, buf, len);
}
static void pasemi_hwcontrol(struct nand_chip *chip, int cmd,
unsigned int ctrl)
{
struct pasemi_ddata *ddata = container_of(chip, struct pasemi_ddata, chip);
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
out_8(chip->legacy.IO_ADDR_W + (1 << CLE_PIN_CTL), cmd);
else
out_8(chip->legacy.IO_ADDR_W + (1 << ALE_PIN_CTL), cmd);
/* Push out posted writes */
eieio();
inl(ddata->lpcctl);
}
static int pasemi_device_ready(struct nand_chip *chip)
{
struct pasemi_ddata *ddata = container_of(chip, struct pasemi_ddata, chip);
return !!(inl(ddata->lpcctl) & LBICTRL_LPCCTL_NR);
}
static int pasemi_attach_chip(struct nand_chip *chip)
{
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
static const struct nand_controller_ops pasemi_ops = {
.attach_chip = pasemi_attach_chip,
};
static int pasemi_nand_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct pci_dev *pdev;
struct device_node *np = dev->of_node;
struct resource res;
struct nand_chip *chip;
struct nand_controller *controller;
int err = 0;
struct pasemi_ddata *ddata;
struct mtd_info *pasemi_nand_mtd;
err = of_address_to_resource(np, 0, &res);
if (err)
return -EINVAL;
dev_dbg(dev, "pasemi_nand at %pR\n", &res);
/* Allocate memory for MTD device structure and private data */
ddata = kzalloc(sizeof(*ddata), GFP_KERNEL);
if (!ddata) {
err = -ENOMEM;
goto out;
}
platform_set_drvdata(ofdev, ddata);
chip = &ddata->chip;
controller = &ddata->controller;
controller->ops = &pasemi_ops;
nand_controller_init(controller);
chip->controller = controller;
pasemi_nand_mtd = nand_to_mtd(chip);
/* Link the private data with the MTD structure */
pasemi_nand_mtd->dev.parent = dev;
chip->legacy.IO_ADDR_R = of_iomap(np, 0);
chip->legacy.IO_ADDR_W = chip->legacy.IO_ADDR_R;
if (!chip->legacy.IO_ADDR_R) {
err = -EIO;
goto out_mtd;
}
pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL);
if (!pdev) {
err = -ENODEV;
goto out_ior;
}
ddata->lpcctl = pci_resource_start(pdev, 0);
pci_dev_put(pdev);
if (!request_region(ddata->lpcctl, 4, driver_name)) {
err = -EBUSY;
goto out_ior;
}
chip->legacy.cmd_ctrl = pasemi_hwcontrol;
chip->legacy.dev_ready = pasemi_device_ready;
chip->legacy.read_buf = pasemi_read_buf;
chip->legacy.write_buf = pasemi_write_buf;
chip->legacy.chip_delay = 0;
/* Enable the following for a flash based bad block table */
chip->bbt_options = NAND_BBT_USE_FLASH;
/*
* This driver assumes that the default ECC engine should be TYPE_SOFT.
* Set ->engine_type before registering the NAND devices in order to
* provide a driver specific default value.
*/
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
/* Scan to find existence of the device */
err = nand_scan(chip, 1);
if (err)
goto out_lpc;
if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
dev_err(dev, "Unable to register MTD device\n");
err = -ENODEV;
goto out_cleanup_nand;
}
dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res,
ddata->lpcctl);
return 0;
out_cleanup_nand:
nand_cleanup(chip);
out_lpc:
release_region(ddata->lpcctl, 4);
out_ior:
iounmap(chip->legacy.IO_ADDR_R);
out_mtd:
kfree(ddata);
out:
return err;
}
static void pasemi_nand_remove(struct platform_device *ofdev)
{
struct pasemi_ddata *ddata = platform_get_drvdata(ofdev);
struct mtd_info *pasemi_nand_mtd;
int ret;
struct nand_chip *chip;
chip = &ddata->chip;
pasemi_nand_mtd = nand_to_mtd(chip);
/* Release resources, unregister device */
ret = mtd_device_unregister(pasemi_nand_mtd);
WARN_ON(ret);
nand_cleanup(chip);
release_region(ddata->lpcctl, 4);
iounmap(chip->legacy.IO_ADDR_R);
/* Free the MTD device structure */
kfree(ddata);
}
static const struct of_device_id pasemi_nand_match[] =
{
{
.compatible = "pasemi,localbus-nand",
},
{},
};
MODULE_DEVICE_TABLE(of, pasemi_nand_match);
static struct platform_driver pasemi_nand_driver =
{
.driver = {
.name = driver_name,
.of_match_table = pasemi_nand_match,
},
.probe = pasemi_nand_probe,
.remove_new = pasemi_nand_remove,
};
module_platform_driver(pasemi_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Egor Martovetsky <[email protected]>");
MODULE_DESCRIPTION("NAND flash interface driver for PA Semi PWRficient");
| linux-master | drivers/mtd/nand/raw/pasemi_nand.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* BCM47XX NAND flash driver
*
* Copyright (C) 2012 Rafał Miłecki <[email protected]>
*/
#include "bcm47xxnflash.h"
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/bcma/bcma.h>
/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
* shown ~1000 retries as maximum. */
#define NFLASH_READY_RETRIES 10000
#define NFLASH_SECTOR_SIZE 512
#define NCTL_CMD0 0x00010000
#define NCTL_COL 0x00020000 /* Update column with value from BCMA_CC_NFLASH_COL_ADDR */
#define NCTL_ROW 0x00040000 /* Update row (page) with value from BCMA_CC_NFLASH_ROW_ADDR */
#define NCTL_CMD1W 0x00080000
#define NCTL_READ 0x00100000
#define NCTL_WRITE 0x00200000
#define NCTL_SPECADDR 0x01000000
#define NCTL_READY 0x04000000
#define NCTL_ERR 0x08000000
#define NCTL_CSA 0x40000000
#define NCTL_START 0x80000000
/**************************************************
* Various helpers
**************************************************/
static inline u8 bcm47xxnflash_ops_bcm4706_ns_to_cycle(u16 ns, u16 clock)
{
return ((ns * 1000 * clock) / 1000000) + 1;
}
static int bcm47xxnflash_ops_bcm4706_ctl_cmd(struct bcma_drv_cc *cc, u32 code)
{
int i = 0;
bcma_cc_write32(cc, BCMA_CC_NFLASH_CTL, NCTL_START | code);
for (i = 0; i < NFLASH_READY_RETRIES; i++) {
if (!(bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_START)) {
i = 0;
break;
}
}
if (i) {
pr_err("NFLASH control command not ready!\n");
return -EBUSY;
}
return 0;
}
static int bcm47xxnflash_ops_bcm4706_poll(struct bcma_drv_cc *cc)
{
int i;
for (i = 0; i < NFLASH_READY_RETRIES; i++) {
if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_READY) {
if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) &
BCMA_CC_NFLASH_CTL_ERR) {
pr_err("Error on polling\n");
return -EBUSY;
} else {
return 0;
}
}
}
pr_err("Polling timeout!\n");
return -EBUSY;
}
/**************************************************
* R/W
**************************************************/
static void bcm47xxnflash_ops_bcm4706_read(struct mtd_info *mtd, uint8_t *buf,
int len)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
u32 ctlcode;
u32 *dest = (u32 *)buf;
int i;
int toread;
BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
/* Don't validate column using nand_chip->page_shift, it may be bigger
* when accessing OOB */
while (len) {
/* We can read maximum of 0x200 bytes at once */
toread = min(len, 0x200);
/* Set page and column */
bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_COL_ADDR,
b47n->curr_column);
bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_ROW_ADDR,
b47n->curr_page_addr);
/* Prepare to read */
ctlcode = NCTL_CSA | NCTL_CMD1W | NCTL_ROW | NCTL_COL |
NCTL_CMD0;
ctlcode |= NAND_CMD_READSTART << 8;
if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode))
return;
if (bcm47xxnflash_ops_bcm4706_poll(b47n->cc))
return;
/* Eventually read some data :) */
for (i = 0; i < toread; i += 4, dest++) {
ctlcode = NCTL_CSA | 0x30000000 | NCTL_READ;
if (i == toread - 4) /* Last read goes without that */
ctlcode &= ~NCTL_CSA;
if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
ctlcode))
return;
*dest = bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA);
}
b47n->curr_column += toread;
len -= toread;
}
}
static void bcm47xxnflash_ops_bcm4706_write(struct mtd_info *mtd,
const uint8_t *buf, int len)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
struct bcma_drv_cc *cc = b47n->cc;
u32 ctlcode;
const u32 *data = (u32 *)buf;
int i;
BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
/* Don't validate column using nand_chip->page_shift, it may be bigger
* when accessing OOB */
for (i = 0; i < len; i += 4, data++) {
bcma_cc_write32(cc, BCMA_CC_NFLASH_DATA, *data);
ctlcode = NCTL_CSA | 0x30000000 | NCTL_WRITE;
if (i == len - 4) /* Last read goes without that */
ctlcode &= ~NCTL_CSA;
if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) {
pr_err("%s ctl_cmd didn't work!\n", __func__);
return;
}
}
b47n->curr_column += len;
}
/**************************************************
* NAND chip ops
**************************************************/
static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip,
int cmd, unsigned int ctrl)
{
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
u32 code = 0;
if (cmd == NAND_CMD_NONE)
return;
if (cmd & NAND_CTRL_CLE)
code = cmd | NCTL_CMD0;
/* nCS is not needed for reset command */
if (cmd != NAND_CMD_RESET)
code |= NCTL_CSA;
bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code);
}
/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
static void bcm47xxnflash_ops_bcm4706_select_chip(struct nand_chip *chip,
int cs)
{
return;
}
static int bcm47xxnflash_ops_bcm4706_dev_ready(struct nand_chip *nand_chip)
{
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
return !!(bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_CTL) & NCTL_READY);
}
/*
* Default nand_command and nand_command_lp don't match BCM4706 hardware layout.
* For example, reading chip id is performed in a non-standard way.
* Setting column and page is also handled differently, we use a special
* registers of ChipCommon core. Hacking cmd_ctrl to understand and convert
* standard commands would be much more complicated.
*/
static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct nand_chip *nand_chip,
unsigned command, int column,
int page_addr)
{
struct mtd_info *mtd = nand_to_mtd(nand_chip);
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
struct bcma_drv_cc *cc = b47n->cc;
u32 ctlcode;
int i;
if (column != -1)
b47n->curr_column = column;
if (page_addr != -1)
b47n->curr_page_addr = page_addr;
switch (command) {
case NAND_CMD_RESET:
nand_chip->legacy.cmd_ctrl(nand_chip, command, NAND_CTRL_CLE);
ndelay(100);
nand_wait_ready(nand_chip);
break;
case NAND_CMD_READID:
ctlcode = NCTL_CSA | 0x01000000 | NCTL_CMD1W | NCTL_CMD0;
ctlcode |= NAND_CMD_READID;
if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode)) {
pr_err("READID error\n");
break;
}
/*
* Reading is specific, last one has to go without NCTL_CSA
* bit. We don't know how many reads NAND subsystem is going
* to perform, so cache everything.
*/
for (i = 0; i < ARRAY_SIZE(b47n->id_data); i++) {
ctlcode = NCTL_CSA | NCTL_READ;
if (i == ARRAY_SIZE(b47n->id_data) - 1)
ctlcode &= ~NCTL_CSA;
if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
ctlcode)) {
pr_err("READID error\n");
break;
}
b47n->id_data[i] =
bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA)
& 0xFF;
}
break;
case NAND_CMD_STATUS:
ctlcode = NCTL_CSA | NCTL_CMD0 | NAND_CMD_STATUS;
if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
pr_err("STATUS command error\n");
break;
case NAND_CMD_READ0:
break;
case NAND_CMD_READOOB:
if (page_addr != -1)
b47n->curr_column += mtd->writesize;
break;
case NAND_CMD_ERASE1:
bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
b47n->curr_page_addr);
ctlcode = NCTL_ROW | NCTL_CMD1W | NCTL_CMD0 |
NAND_CMD_ERASE1 | (NAND_CMD_ERASE2 << 8);
if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
pr_err("ERASE1 failed\n");
break;
case NAND_CMD_ERASE2:
break;
case NAND_CMD_SEQIN:
/* Set page and column */
bcma_cc_write32(cc, BCMA_CC_NFLASH_COL_ADDR,
b47n->curr_column);
bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
b47n->curr_page_addr);
/* Prepare to write */
ctlcode = 0x40000000 | NCTL_ROW | NCTL_COL | NCTL_CMD0;
ctlcode |= NAND_CMD_SEQIN;
if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
pr_err("SEQIN failed\n");
break;
case NAND_CMD_PAGEPROG:
if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_CMD0 |
NAND_CMD_PAGEPROG))
pr_err("PAGEPROG failed\n");
if (bcm47xxnflash_ops_bcm4706_poll(cc))
pr_err("PAGEPROG not ready\n");
break;
default:
pr_err("Command 0x%X unsupported\n", command);
break;
}
b47n->curr_command = command;
}
static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct nand_chip *nand_chip)
{
struct mtd_info *mtd = nand_to_mtd(nand_chip);
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
struct bcma_drv_cc *cc = b47n->cc;
u32 tmp = 0;
switch (b47n->curr_command) {
case NAND_CMD_READID:
if (b47n->curr_column >= ARRAY_SIZE(b47n->id_data)) {
pr_err("Requested invalid id_data: %d\n",
b47n->curr_column);
return 0;
}
return b47n->id_data[b47n->curr_column++];
case NAND_CMD_STATUS:
if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_READ))
return 0;
return bcma_cc_read32(cc, BCMA_CC_NFLASH_DATA) & 0xff;
case NAND_CMD_READOOB:
bcm47xxnflash_ops_bcm4706_read(mtd, (u8 *)&tmp, 4);
return tmp & 0xFF;
}
pr_err("Invalid command for byte read: 0x%X\n", b47n->curr_command);
return 0;
}
static void bcm47xxnflash_ops_bcm4706_read_buf(struct nand_chip *nand_chip,
uint8_t *buf, int len)
{
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
switch (b47n->curr_command) {
case NAND_CMD_READ0:
case NAND_CMD_READOOB:
bcm47xxnflash_ops_bcm4706_read(nand_to_mtd(nand_chip), buf,
len);
return;
}
pr_err("Invalid command for buf read: 0x%X\n", b47n->curr_command);
}
static void bcm47xxnflash_ops_bcm4706_write_buf(struct nand_chip *nand_chip,
const uint8_t *buf, int len)
{
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
switch (b47n->curr_command) {
case NAND_CMD_SEQIN:
bcm47xxnflash_ops_bcm4706_write(nand_to_mtd(nand_chip), buf,
len);
return;
}
pr_err("Invalid command for buf write: 0x%X\n", b47n->curr_command);
}
/**************************************************
* Init
**************************************************/
int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
{
struct nand_chip *nand_chip = (struct nand_chip *)&b47n->nand_chip;
int err;
u32 freq;
u16 clock;
u8 w0, w1, w2, w3, w4;
unsigned long chipsize; /* MiB */
u8 tbits, col_bits, col_size, row_bits, row_bsize;
u32 val;
nand_chip->legacy.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
nand_chip->legacy.cmd_ctrl = bcm47xxnflash_ops_bcm4706_cmd_ctrl;
nand_chip->legacy.dev_ready = bcm47xxnflash_ops_bcm4706_dev_ready;
b47n->nand_chip.legacy.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
b47n->nand_chip.legacy.read_byte = bcm47xxnflash_ops_bcm4706_read_byte;
b47n->nand_chip.legacy.read_buf = bcm47xxnflash_ops_bcm4706_read_buf;
b47n->nand_chip.legacy.write_buf = bcm47xxnflash_ops_bcm4706_write_buf;
b47n->nand_chip.legacy.set_features = nand_get_set_features_notsupp;
b47n->nand_chip.legacy.get_features = nand_get_set_features_notsupp;
nand_chip->legacy.chip_delay = 50;
b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
/* TODO: implement ECC */
b47n->nand_chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_NONE;
/* Enable NAND flash access */
bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
BCMA_CC_4706_FLASHSCFG_NF1);
/* Configure wait counters */
if (b47n->cc->status & BCMA_CC_CHIPST_4706_PKG_OPTION) {
/* 400 MHz */
freq = 400000000 / 4;
} else {
freq = bcma_chipco_pll_read(b47n->cc, 4);
freq = (freq & 0xFFF) >> 3;
/* Fixed reference clock 25 MHz and m = 2 */
freq = (freq * 25000000 / 2) / 4;
}
clock = freq / 1000000;
w0 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(15, clock);
w1 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(20, clock);
w2 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
w3 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
w4 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(100, clock);
bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_WAITCNT0,
(w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0));
/* Scan NAND */
err = nand_scan(&b47n->nand_chip, 1);
if (err) {
pr_err("Could not scan NAND flash: %d\n", err);
goto exit;
}
/* Configure FLASH */
chipsize = nanddev_target_size(&b47n->nand_chip.base) >> 20;
tbits = ffs(chipsize); /* find first bit set */
if (!tbits || tbits != fls(chipsize)) {
pr_err("Invalid flash size: 0x%lX\n", chipsize);
err = -ENOTSUPP;
goto exit;
}
tbits += 19; /* Broadcom increases *index* by 20, we increase *pos* */
col_bits = b47n->nand_chip.page_shift + 1;
col_size = (col_bits + 7) / 8;
row_bits = tbits - col_bits + 1;
row_bsize = (row_bits + 7) / 8;
val = ((row_bsize - 1) << 6) | ((col_size - 1) << 4) | 2;
bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_CONF, val);
exit:
if (err)
bcma_cc_mask32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
~BCMA_CC_4706_FLASHSCFG_NF1);
return err;
}
| linux-master | drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* BCM47XX NAND flash driver
*
* Copyright (C) 2012 Rafał Miłecki <[email protected]>
*/
#include "bcm47xxnflash.h"
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rafał Miłecki");
static const char *probes[] = { "bcm47xxpart", NULL };
static int bcm47xxnflash_probe(struct platform_device *pdev)
{
struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
struct bcm47xxnflash *b47n;
struct mtd_info *mtd;
int err = 0;
b47n = devm_kzalloc(&pdev->dev, sizeof(*b47n), GFP_KERNEL);
if (!b47n)
return -ENOMEM;
nand_set_controller_data(&b47n->nand_chip, b47n);
mtd = nand_to_mtd(&b47n->nand_chip);
mtd->dev.parent = &pdev->dev;
b47n->cc = container_of(nflash, struct bcma_drv_cc, nflash);
if (b47n->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
err = bcm47xxnflash_ops_bcm4706_init(b47n);
} else {
pr_err("Device not supported\n");
err = -ENOTSUPP;
}
if (err) {
pr_err("Initialization failed: %d\n", err);
return err;
}
platform_set_drvdata(pdev, b47n);
err = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
if (err) {
pr_err("Failed to register MTD device: %d\n", err);
return err;
}
return 0;
}
static void bcm47xxnflash_remove(struct platform_device *pdev)
{
struct bcm47xxnflash *nflash = platform_get_drvdata(pdev);
struct nand_chip *chip = &nflash->nand_chip;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
}
static struct platform_driver bcm47xxnflash_driver = {
.probe = bcm47xxnflash_probe,
.remove_new = bcm47xxnflash_remove,
.driver = {
.name = "bcma_nflash",
},
};
module_platform_driver(bcm47xxnflash_driver);
| linux-master | drivers/mtd/nand/raw/bcm47xxnflash/main.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2015 Broadcom Corporation
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "brcmnand.h"
struct iproc_nand_soc {
struct brcmnand_soc soc;
void __iomem *idm_base;
void __iomem *ext_base;
spinlock_t idm_lock;
};
#define IPROC_NAND_CTLR_READY_OFFSET 0x10
#define IPROC_NAND_CTLR_READY BIT(0)
#define IPROC_NAND_IO_CTRL_OFFSET 0x00
#define IPROC_NAND_APB_LE_MODE BIT(24)
#define IPROC_NAND_INT_CTRL_READ_ENABLE BIT(6)
static bool iproc_nand_intc_ack(struct brcmnand_soc *soc)
{
struct iproc_nand_soc *priv =
container_of(soc, struct iproc_nand_soc, soc);
void __iomem *mmio = priv->ext_base + IPROC_NAND_CTLR_READY_OFFSET;
u32 val = brcmnand_readl(mmio);
if (val & IPROC_NAND_CTLR_READY) {
brcmnand_writel(IPROC_NAND_CTLR_READY, mmio);
return true;
}
return false;
}
static void iproc_nand_intc_set(struct brcmnand_soc *soc, bool en)
{
struct iproc_nand_soc *priv =
container_of(soc, struct iproc_nand_soc, soc);
void __iomem *mmio = priv->idm_base + IPROC_NAND_IO_CTRL_OFFSET;
u32 val;
unsigned long flags;
spin_lock_irqsave(&priv->idm_lock, flags);
val = brcmnand_readl(mmio);
if (en)
val |= IPROC_NAND_INT_CTRL_READ_ENABLE;
else
val &= ~IPROC_NAND_INT_CTRL_READ_ENABLE;
brcmnand_writel(val, mmio);
spin_unlock_irqrestore(&priv->idm_lock, flags);
}
static void iproc_nand_apb_access(struct brcmnand_soc *soc, bool prepare,
bool is_param)
{
struct iproc_nand_soc *priv =
container_of(soc, struct iproc_nand_soc, soc);
void __iomem *mmio = priv->idm_base + IPROC_NAND_IO_CTRL_OFFSET;
u32 val;
unsigned long flags;
spin_lock_irqsave(&priv->idm_lock, flags);
val = brcmnand_readl(mmio);
/*
* In the case of BE or when dealing with NAND data, alway configure
* the APB bus to LE mode before accessing the FIFO and back to BE mode
* after the access is done
*/
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) || !is_param) {
if (prepare)
val |= IPROC_NAND_APB_LE_MODE;
else
val &= ~IPROC_NAND_APB_LE_MODE;
} else { /* when in LE accessing the parameter page, keep APB in BE */
val &= ~IPROC_NAND_APB_LE_MODE;
}
brcmnand_writel(val, mmio);
spin_unlock_irqrestore(&priv->idm_lock, flags);
}
static int iproc_nand_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct iproc_nand_soc *priv;
struct brcmnand_soc *soc;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
soc = &priv->soc;
spin_lock_init(&priv->idm_lock);
priv->idm_base = devm_platform_ioremap_resource_byname(pdev, "iproc-idm");
if (IS_ERR(priv->idm_base))
return PTR_ERR(priv->idm_base);
priv->ext_base = devm_platform_ioremap_resource_byname(pdev, "iproc-ext");
if (IS_ERR(priv->ext_base))
return PTR_ERR(priv->ext_base);
soc->ctlrdy_ack = iproc_nand_intc_ack;
soc->ctlrdy_set_enabled = iproc_nand_intc_set;
soc->prepare_data_bus = iproc_nand_apb_access;
return brcmnand_probe(pdev, soc);
}
static const struct of_device_id iproc_nand_of_match[] = {
{ .compatible = "brcm,nand-iproc" },
{},
};
MODULE_DEVICE_TABLE(of, iproc_nand_of_match);
static struct platform_driver iproc_nand_driver = {
.probe = iproc_nand_probe,
.remove = brcmnand_remove,
.driver = {
.name = "iproc_nand",
.pm = &brcmnand_pm_ops,
.of_match_table = iproc_nand_of_match,
}
};
module_platform_driver(iproc_nand_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Brian Norris");
MODULE_AUTHOR("Ray Jui");
MODULE_DESCRIPTION("NAND driver for Broadcom IPROC-based SoCs");
| linux-master | drivers/mtd/nand/raw/brcmnand/iproc_nand.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2010-2015 Broadcom Corporation
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/platform_data/brcmnand.h>
#include <linux/err.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/ioport.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/mm.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/static_key.h>
#include <linux/list.h>
#include <linux/log2.h>
#include "brcmnand.h"
/*
* This flag controls if WP stays on between erase/write commands to mitigate
* flash corruption due to power glitches. Values:
* 0: NAND_WP is not used or not available
* 1: NAND_WP is set by default, cleared for erase/write operations
* 2: NAND_WP is always cleared
*/
static int wp_on = 1;
module_param(wp_on, int, 0444);
/***********************************************************************
* Definitions
***********************************************************************/
#define DRV_NAME "brcmnand"
#define CMD_NULL 0x00
#define CMD_PAGE_READ 0x01
#define CMD_SPARE_AREA_READ 0x02
#define CMD_STATUS_READ 0x03
#define CMD_PROGRAM_PAGE 0x04
#define CMD_PROGRAM_SPARE_AREA 0x05
#define CMD_COPY_BACK 0x06
#define CMD_DEVICE_ID_READ 0x07
#define CMD_BLOCK_ERASE 0x08
#define CMD_FLASH_RESET 0x09
#define CMD_BLOCKS_LOCK 0x0a
#define CMD_BLOCKS_LOCK_DOWN 0x0b
#define CMD_BLOCKS_UNLOCK 0x0c
#define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
#define CMD_PARAMETER_READ 0x0e
#define CMD_PARAMETER_CHANGE_COL 0x0f
#define CMD_LOW_LEVEL_OP 0x10
struct brcm_nand_dma_desc {
u32 next_desc;
u32 next_desc_ext;
u32 cmd_irq;
u32 dram_addr;
u32 dram_addr_ext;
u32 tfr_len;
u32 total_len;
u32 flash_addr;
u32 flash_addr_ext;
u32 cs;
u32 pad2[5];
u32 status_valid;
} __packed;
/* Bitfields for brcm_nand_dma_desc::status_valid */
#define FLASH_DMA_ECC_ERROR (1 << 8)
#define FLASH_DMA_CORR_ERROR (1 << 9)
/* Bitfields for DMA_MODE */
#define FLASH_DMA_MODE_STOP_ON_ERROR BIT(1) /* stop in Uncorr ECC error */
#define FLASH_DMA_MODE_MODE BIT(0) /* link list */
#define FLASH_DMA_MODE_MASK (FLASH_DMA_MODE_STOP_ON_ERROR | \
FLASH_DMA_MODE_MODE)
/* 512B flash cache in the NAND controller HW */
#define FC_SHIFT 9U
#define FC_BYTES 512U
#define FC_WORDS (FC_BYTES >> 2)
#define BRCMNAND_MIN_PAGESIZE 512
#define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
#define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
#define NAND_POLL_STATUS_TIMEOUT_MS 100
#define EDU_CMD_WRITE 0x00
#define EDU_CMD_READ 0x01
#define EDU_STATUS_ACTIVE BIT(0)
#define EDU_ERR_STATUS_ERRACK BIT(0)
#define EDU_DONE_MASK GENMASK(1, 0)
#define EDU_CONFIG_MODE_NAND BIT(0)
#define EDU_CONFIG_SWAP_BYTE BIT(1)
#ifdef CONFIG_CPU_BIG_ENDIAN
#define EDU_CONFIG_SWAP_CFG EDU_CONFIG_SWAP_BYTE
#else
#define EDU_CONFIG_SWAP_CFG 0
#endif
/* edu registers */
enum edu_reg {
EDU_CONFIG = 0,
EDU_DRAM_ADDR,
EDU_EXT_ADDR,
EDU_LENGTH,
EDU_CMD,
EDU_STOP,
EDU_STATUS,
EDU_DONE,
EDU_ERR_STATUS,
};
static const u16 edu_regs[] = {
[EDU_CONFIG] = 0x00,
[EDU_DRAM_ADDR] = 0x04,
[EDU_EXT_ADDR] = 0x08,
[EDU_LENGTH] = 0x0c,
[EDU_CMD] = 0x10,
[EDU_STOP] = 0x14,
[EDU_STATUS] = 0x18,
[EDU_DONE] = 0x1c,
[EDU_ERR_STATUS] = 0x20,
};
/* flash_dma registers */
enum flash_dma_reg {
FLASH_DMA_REVISION = 0,
FLASH_DMA_FIRST_DESC,
FLASH_DMA_FIRST_DESC_EXT,
FLASH_DMA_CTRL,
FLASH_DMA_MODE,
FLASH_DMA_STATUS,
FLASH_DMA_INTERRUPT_DESC,
FLASH_DMA_INTERRUPT_DESC_EXT,
FLASH_DMA_ERROR_STATUS,
FLASH_DMA_CURRENT_DESC,
FLASH_DMA_CURRENT_DESC_EXT,
};
/* flash_dma registers v0*/
static const u16 flash_dma_regs_v0[] = {
[FLASH_DMA_REVISION] = 0x00,
[FLASH_DMA_FIRST_DESC] = 0x04,
[FLASH_DMA_CTRL] = 0x08,
[FLASH_DMA_MODE] = 0x0c,
[FLASH_DMA_STATUS] = 0x10,
[FLASH_DMA_INTERRUPT_DESC] = 0x14,
[FLASH_DMA_ERROR_STATUS] = 0x18,
[FLASH_DMA_CURRENT_DESC] = 0x1c,
};
/* flash_dma registers v1*/
static const u16 flash_dma_regs_v1[] = {
[FLASH_DMA_REVISION] = 0x00,
[FLASH_DMA_FIRST_DESC] = 0x04,
[FLASH_DMA_FIRST_DESC_EXT] = 0x08,
[FLASH_DMA_CTRL] = 0x0c,
[FLASH_DMA_MODE] = 0x10,
[FLASH_DMA_STATUS] = 0x14,
[FLASH_DMA_INTERRUPT_DESC] = 0x18,
[FLASH_DMA_INTERRUPT_DESC_EXT] = 0x1c,
[FLASH_DMA_ERROR_STATUS] = 0x20,
[FLASH_DMA_CURRENT_DESC] = 0x24,
[FLASH_DMA_CURRENT_DESC_EXT] = 0x28,
};
/* flash_dma registers v4 */
static const u16 flash_dma_regs_v4[] = {
[FLASH_DMA_REVISION] = 0x00,
[FLASH_DMA_FIRST_DESC] = 0x08,
[FLASH_DMA_FIRST_DESC_EXT] = 0x0c,
[FLASH_DMA_CTRL] = 0x10,
[FLASH_DMA_MODE] = 0x14,
[FLASH_DMA_STATUS] = 0x18,
[FLASH_DMA_INTERRUPT_DESC] = 0x20,
[FLASH_DMA_INTERRUPT_DESC_EXT] = 0x24,
[FLASH_DMA_ERROR_STATUS] = 0x28,
[FLASH_DMA_CURRENT_DESC] = 0x30,
[FLASH_DMA_CURRENT_DESC_EXT] = 0x34,
};
/* Controller feature flags */
enum {
BRCMNAND_HAS_1K_SECTORS = BIT(0),
BRCMNAND_HAS_PREFETCH = BIT(1),
BRCMNAND_HAS_CACHE_MODE = BIT(2),
BRCMNAND_HAS_WP = BIT(3),
};
struct brcmnand_host;
static DEFINE_STATIC_KEY_FALSE(brcmnand_soc_has_ops_key);
struct brcmnand_controller {
struct device *dev;
struct nand_controller controller;
void __iomem *nand_base;
void __iomem *nand_fc; /* flash cache */
void __iomem *flash_dma_base;
int irq;
unsigned int dma_irq;
int nand_version;
/* Some SoCs provide custom interrupt status register(s) */
struct brcmnand_soc *soc;
/* Some SoCs have a gateable clock for the controller */
struct clk *clk;
int cmd_pending;
bool dma_pending;
bool edu_pending;
struct completion done;
struct completion dma_done;
struct completion edu_done;
/* List of NAND hosts (one for each chip-select) */
struct list_head host_list;
/* EDU info, per-transaction */
const u16 *edu_offsets;
void __iomem *edu_base;
int edu_irq;
int edu_count;
u64 edu_dram_addr;
u32 edu_ext_addr;
u32 edu_cmd;
u32 edu_config;
int sas; /* spare area size, per flash cache */
int sector_size_1k;
u8 *oob;
/* flash_dma reg */
const u16 *flash_dma_offsets;
struct brcm_nand_dma_desc *dma_desc;
dma_addr_t dma_pa;
int (*dma_trans)(struct brcmnand_host *host, u64 addr, u32 *buf,
u8 *oob, u32 len, u8 dma_cmd);
/* in-memory cache of the FLASH_CACHE, used only for some commands */
u8 flash_cache[FC_BYTES];
/* Controller revision details */
const u16 *reg_offsets;
unsigned int reg_spacing; /* between CS1, CS2, ... regs */
const u8 *cs_offsets; /* within each chip-select */
const u8 *cs0_offsets; /* within CS0, if different */
unsigned int max_block_size;
const unsigned int *block_sizes;
unsigned int max_page_size;
const unsigned int *page_sizes;
unsigned int page_size_shift;
unsigned int max_oob;
u32 ecc_level_shift;
u32 features;
/* for low-power standby/resume only */
u32 nand_cs_nand_select;
u32 nand_cs_nand_xor;
u32 corr_stat_threshold;
u32 flash_dma_mode;
u32 flash_edu_mode;
bool pio_poll_mode;
};
struct brcmnand_cfg {
u64 device_size;
unsigned int block_size;
unsigned int page_size;
unsigned int spare_area_size;
unsigned int device_width;
unsigned int col_adr_bytes;
unsigned int blk_adr_bytes;
unsigned int ful_adr_bytes;
unsigned int sector_size_1k;
unsigned int ecc_level;
/* use for low-power standby/resume only */
u32 acc_control;
u32 config;
u32 config_ext;
u32 timing_1;
u32 timing_2;
};
struct brcmnand_host {
struct list_head node;
struct nand_chip chip;
struct platform_device *pdev;
int cs;
unsigned int last_cmd;
unsigned int last_byte;
u64 last_addr;
struct brcmnand_cfg hwcfg;
struct brcmnand_controller *ctrl;
};
enum brcmnand_reg {
BRCMNAND_CMD_START = 0,
BRCMNAND_CMD_EXT_ADDRESS,
BRCMNAND_CMD_ADDRESS,
BRCMNAND_INTFC_STATUS,
BRCMNAND_CS_SELECT,
BRCMNAND_CS_XOR,
BRCMNAND_LL_OP,
BRCMNAND_CS0_BASE,
BRCMNAND_CS1_BASE, /* CS1 regs, if non-contiguous */
BRCMNAND_CORR_THRESHOLD,
BRCMNAND_CORR_THRESHOLD_EXT,
BRCMNAND_UNCORR_COUNT,
BRCMNAND_CORR_COUNT,
BRCMNAND_CORR_EXT_ADDR,
BRCMNAND_CORR_ADDR,
BRCMNAND_UNCORR_EXT_ADDR,
BRCMNAND_UNCORR_ADDR,
BRCMNAND_SEMAPHORE,
BRCMNAND_ID,
BRCMNAND_ID_EXT,
BRCMNAND_LL_RDATA,
BRCMNAND_OOB_READ_BASE,
BRCMNAND_OOB_READ_10_BASE, /* offset 0x10, if non-contiguous */
BRCMNAND_OOB_WRITE_BASE,
BRCMNAND_OOB_WRITE_10_BASE, /* offset 0x10, if non-contiguous */
BRCMNAND_FC_BASE,
};
/* BRCMNAND v2.1-v2.2 */
static const u16 brcmnand_regs_v21[] = {
[BRCMNAND_CMD_START] = 0x04,
[BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
[BRCMNAND_CMD_ADDRESS] = 0x0c,
[BRCMNAND_INTFC_STATUS] = 0x5c,
[BRCMNAND_CS_SELECT] = 0x14,
[BRCMNAND_CS_XOR] = 0x18,
[BRCMNAND_LL_OP] = 0,
[BRCMNAND_CS0_BASE] = 0x40,
[BRCMNAND_CS1_BASE] = 0,
[BRCMNAND_CORR_THRESHOLD] = 0,
[BRCMNAND_CORR_THRESHOLD_EXT] = 0,
[BRCMNAND_UNCORR_COUNT] = 0,
[BRCMNAND_CORR_COUNT] = 0,
[BRCMNAND_CORR_EXT_ADDR] = 0x60,
[BRCMNAND_CORR_ADDR] = 0x64,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x68,
[BRCMNAND_UNCORR_ADDR] = 0x6c,
[BRCMNAND_SEMAPHORE] = 0x50,
[BRCMNAND_ID] = 0x54,
[BRCMNAND_ID_EXT] = 0,
[BRCMNAND_LL_RDATA] = 0,
[BRCMNAND_OOB_READ_BASE] = 0x20,
[BRCMNAND_OOB_READ_10_BASE] = 0,
[BRCMNAND_OOB_WRITE_BASE] = 0x30,
[BRCMNAND_OOB_WRITE_10_BASE] = 0,
[BRCMNAND_FC_BASE] = 0x200,
};
/* BRCMNAND v3.3-v4.0 */
static const u16 brcmnand_regs_v33[] = {
[BRCMNAND_CMD_START] = 0x04,
[BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
[BRCMNAND_CMD_ADDRESS] = 0x0c,
[BRCMNAND_INTFC_STATUS] = 0x6c,
[BRCMNAND_CS_SELECT] = 0x14,
[BRCMNAND_CS_XOR] = 0x18,
[BRCMNAND_LL_OP] = 0x178,
[BRCMNAND_CS0_BASE] = 0x40,
[BRCMNAND_CS1_BASE] = 0xd0,
[BRCMNAND_CORR_THRESHOLD] = 0x84,
[BRCMNAND_CORR_THRESHOLD_EXT] = 0,
[BRCMNAND_UNCORR_COUNT] = 0,
[BRCMNAND_CORR_COUNT] = 0,
[BRCMNAND_CORR_EXT_ADDR] = 0x70,
[BRCMNAND_CORR_ADDR] = 0x74,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
[BRCMNAND_UNCORR_ADDR] = 0x7c,
[BRCMNAND_SEMAPHORE] = 0x58,
[BRCMNAND_ID] = 0x60,
[BRCMNAND_ID_EXT] = 0x64,
[BRCMNAND_LL_RDATA] = 0x17c,
[BRCMNAND_OOB_READ_BASE] = 0x20,
[BRCMNAND_OOB_READ_10_BASE] = 0x130,
[BRCMNAND_OOB_WRITE_BASE] = 0x30,
[BRCMNAND_OOB_WRITE_10_BASE] = 0,
[BRCMNAND_FC_BASE] = 0x200,
};
/* BRCMNAND v5.0 */
static const u16 brcmnand_regs_v50[] = {
[BRCMNAND_CMD_START] = 0x04,
[BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
[BRCMNAND_CMD_ADDRESS] = 0x0c,
[BRCMNAND_INTFC_STATUS] = 0x6c,
[BRCMNAND_CS_SELECT] = 0x14,
[BRCMNAND_CS_XOR] = 0x18,
[BRCMNAND_LL_OP] = 0x178,
[BRCMNAND_CS0_BASE] = 0x40,
[BRCMNAND_CS1_BASE] = 0xd0,
[BRCMNAND_CORR_THRESHOLD] = 0x84,
[BRCMNAND_CORR_THRESHOLD_EXT] = 0,
[BRCMNAND_UNCORR_COUNT] = 0,
[BRCMNAND_CORR_COUNT] = 0,
[BRCMNAND_CORR_EXT_ADDR] = 0x70,
[BRCMNAND_CORR_ADDR] = 0x74,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
[BRCMNAND_UNCORR_ADDR] = 0x7c,
[BRCMNAND_SEMAPHORE] = 0x58,
[BRCMNAND_ID] = 0x60,
[BRCMNAND_ID_EXT] = 0x64,
[BRCMNAND_LL_RDATA] = 0x17c,
[BRCMNAND_OOB_READ_BASE] = 0x20,
[BRCMNAND_OOB_READ_10_BASE] = 0x130,
[BRCMNAND_OOB_WRITE_BASE] = 0x30,
[BRCMNAND_OOB_WRITE_10_BASE] = 0x140,
[BRCMNAND_FC_BASE] = 0x200,
};
/* BRCMNAND v6.0 - v7.1 */
static const u16 brcmnand_regs_v60[] = {
[BRCMNAND_CMD_START] = 0x04,
[BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
[BRCMNAND_CMD_ADDRESS] = 0x0c,
[BRCMNAND_INTFC_STATUS] = 0x14,
[BRCMNAND_CS_SELECT] = 0x18,
[BRCMNAND_CS_XOR] = 0x1c,
[BRCMNAND_LL_OP] = 0x20,
[BRCMNAND_CS0_BASE] = 0x50,
[BRCMNAND_CS1_BASE] = 0,
[BRCMNAND_CORR_THRESHOLD] = 0xc0,
[BRCMNAND_CORR_THRESHOLD_EXT] = 0xc4,
[BRCMNAND_UNCORR_COUNT] = 0xfc,
[BRCMNAND_CORR_COUNT] = 0x100,
[BRCMNAND_CORR_EXT_ADDR] = 0x10c,
[BRCMNAND_CORR_ADDR] = 0x110,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
[BRCMNAND_UNCORR_ADDR] = 0x118,
[BRCMNAND_SEMAPHORE] = 0x150,
[BRCMNAND_ID] = 0x194,
[BRCMNAND_ID_EXT] = 0x198,
[BRCMNAND_LL_RDATA] = 0x19c,
[BRCMNAND_OOB_READ_BASE] = 0x200,
[BRCMNAND_OOB_READ_10_BASE] = 0,
[BRCMNAND_OOB_WRITE_BASE] = 0x280,
[BRCMNAND_OOB_WRITE_10_BASE] = 0,
[BRCMNAND_FC_BASE] = 0x400,
};
/* BRCMNAND v7.1 */
static const u16 brcmnand_regs_v71[] = {
[BRCMNAND_CMD_START] = 0x04,
[BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
[BRCMNAND_CMD_ADDRESS] = 0x0c,
[BRCMNAND_INTFC_STATUS] = 0x14,
[BRCMNAND_CS_SELECT] = 0x18,
[BRCMNAND_CS_XOR] = 0x1c,
[BRCMNAND_LL_OP] = 0x20,
[BRCMNAND_CS0_BASE] = 0x50,
[BRCMNAND_CS1_BASE] = 0,
[BRCMNAND_CORR_THRESHOLD] = 0xdc,
[BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
[BRCMNAND_UNCORR_COUNT] = 0xfc,
[BRCMNAND_CORR_COUNT] = 0x100,
[BRCMNAND_CORR_EXT_ADDR] = 0x10c,
[BRCMNAND_CORR_ADDR] = 0x110,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
[BRCMNAND_UNCORR_ADDR] = 0x118,
[BRCMNAND_SEMAPHORE] = 0x150,
[BRCMNAND_ID] = 0x194,
[BRCMNAND_ID_EXT] = 0x198,
[BRCMNAND_LL_RDATA] = 0x19c,
[BRCMNAND_OOB_READ_BASE] = 0x200,
[BRCMNAND_OOB_READ_10_BASE] = 0,
[BRCMNAND_OOB_WRITE_BASE] = 0x280,
[BRCMNAND_OOB_WRITE_10_BASE] = 0,
[BRCMNAND_FC_BASE] = 0x400,
};
/* BRCMNAND v7.2 */
static const u16 brcmnand_regs_v72[] = {
[BRCMNAND_CMD_START] = 0x04,
[BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
[BRCMNAND_CMD_ADDRESS] = 0x0c,
[BRCMNAND_INTFC_STATUS] = 0x14,
[BRCMNAND_CS_SELECT] = 0x18,
[BRCMNAND_CS_XOR] = 0x1c,
[BRCMNAND_LL_OP] = 0x20,
[BRCMNAND_CS0_BASE] = 0x50,
[BRCMNAND_CS1_BASE] = 0,
[BRCMNAND_CORR_THRESHOLD] = 0xdc,
[BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
[BRCMNAND_UNCORR_COUNT] = 0xfc,
[BRCMNAND_CORR_COUNT] = 0x100,
[BRCMNAND_CORR_EXT_ADDR] = 0x10c,
[BRCMNAND_CORR_ADDR] = 0x110,
[BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
[BRCMNAND_UNCORR_ADDR] = 0x118,
[BRCMNAND_SEMAPHORE] = 0x150,
[BRCMNAND_ID] = 0x194,
[BRCMNAND_ID_EXT] = 0x198,
[BRCMNAND_LL_RDATA] = 0x19c,
[BRCMNAND_OOB_READ_BASE] = 0x200,
[BRCMNAND_OOB_READ_10_BASE] = 0,
[BRCMNAND_OOB_WRITE_BASE] = 0x400,
[BRCMNAND_OOB_WRITE_10_BASE] = 0,
[BRCMNAND_FC_BASE] = 0x600,
};
enum brcmnand_cs_reg {
BRCMNAND_CS_CFG_EXT = 0,
BRCMNAND_CS_CFG,
BRCMNAND_CS_ACC_CONTROL,
BRCMNAND_CS_TIMING1,
BRCMNAND_CS_TIMING2,
};
/* Per chip-select offsets for v7.1 */
static const u8 brcmnand_cs_offsets_v71[] = {
[BRCMNAND_CS_ACC_CONTROL] = 0x00,
[BRCMNAND_CS_CFG_EXT] = 0x04,
[BRCMNAND_CS_CFG] = 0x08,
[BRCMNAND_CS_TIMING1] = 0x0c,
[BRCMNAND_CS_TIMING2] = 0x10,
};
/* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
static const u8 brcmnand_cs_offsets[] = {
[BRCMNAND_CS_ACC_CONTROL] = 0x00,
[BRCMNAND_CS_CFG_EXT] = 0x04,
[BRCMNAND_CS_CFG] = 0x04,
[BRCMNAND_CS_TIMING1] = 0x08,
[BRCMNAND_CS_TIMING2] = 0x0c,
};
/* Per chip-select offset for <= v5.0 on CS0 only */
static const u8 brcmnand_cs_offsets_cs0[] = {
[BRCMNAND_CS_ACC_CONTROL] = 0x00,
[BRCMNAND_CS_CFG_EXT] = 0x08,
[BRCMNAND_CS_CFG] = 0x08,
[BRCMNAND_CS_TIMING1] = 0x10,
[BRCMNAND_CS_TIMING2] = 0x14,
};
/*
* Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
* one config register, but once the bitfields overflowed, newer controllers
* (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
*/
enum {
CFG_BLK_ADR_BYTES_SHIFT = 8,
CFG_COL_ADR_BYTES_SHIFT = 12,
CFG_FUL_ADR_BYTES_SHIFT = 16,
CFG_BUS_WIDTH_SHIFT = 23,
CFG_BUS_WIDTH = BIT(CFG_BUS_WIDTH_SHIFT),
CFG_DEVICE_SIZE_SHIFT = 24,
/* Only for v2.1 */
CFG_PAGE_SIZE_SHIFT_v2_1 = 30,
/* Only for pre-v7.1 (with no CFG_EXT register) */
CFG_PAGE_SIZE_SHIFT = 20,
CFG_BLK_SIZE_SHIFT = 28,
/* Only for v7.1+ (with CFG_EXT register) */
CFG_EXT_PAGE_SIZE_SHIFT = 0,
CFG_EXT_BLK_SIZE_SHIFT = 4,
};
/* BRCMNAND_INTFC_STATUS */
enum {
INTFC_FLASH_STATUS = GENMASK(7, 0),
INTFC_ERASED = BIT(27),
INTFC_OOB_VALID = BIT(28),
INTFC_CACHE_VALID = BIT(29),
INTFC_FLASH_READY = BIT(30),
INTFC_CTLR_READY = BIT(31),
};
/***********************************************************************
* NAND ACC CONTROL bitfield
*
* Some bits have remained constant throughout hardware revision, while
* others have shifted around.
***********************************************************************/
/* Constant for all versions (where supported) */
enum {
/* See BRCMNAND_HAS_CACHE_MODE */
ACC_CONTROL_CACHE_MODE = BIT(22),
/* See BRCMNAND_HAS_PREFETCH */
ACC_CONTROL_PREFETCH = BIT(23),
ACC_CONTROL_PAGE_HIT = BIT(24),
ACC_CONTROL_WR_PREEMPT = BIT(25),
ACC_CONTROL_PARTIAL_PAGE = BIT(26),
ACC_CONTROL_RD_ERASED = BIT(27),
ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
ACC_CONTROL_WR_ECC = BIT(30),
ACC_CONTROL_RD_ECC = BIT(31),
};
#define ACC_CONTROL_ECC_SHIFT 16
/* Only for v7.2 */
#define ACC_CONTROL_ECC_EXT_SHIFT 13
static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
{
#if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA)
return static_branch_unlikely(&brcmnand_soc_has_ops_key);
#else
return false;
#endif
}
static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
{
if (brcmnand_non_mmio_ops(ctrl))
return brcmnand_soc_read(ctrl->soc, offs);
return brcmnand_readl(ctrl->nand_base + offs);
}
static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
u32 val)
{
if (brcmnand_non_mmio_ops(ctrl))
brcmnand_soc_write(ctrl->soc, val, offs);
else
brcmnand_writel(val, ctrl->nand_base + offs);
}
static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
{
static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
static const unsigned int block_sizes_v2_2[] = { 16, 128, 8, 512, 256, 0 };
static const unsigned int block_sizes_v2_1[] = { 16, 128, 8, 512, 0 };
static const unsigned int page_sizes_v3_4[] = { 512, 2048, 4096, 8192, 0 };
static const unsigned int page_sizes_v2_2[] = { 512, 2048, 4096, 0 };
static const unsigned int page_sizes_v2_1[] = { 512, 2048, 0 };
ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
/* Only support v2.1+ */
if (ctrl->nand_version < 0x0201) {
dev_err(ctrl->dev, "version %#x not supported\n",
ctrl->nand_version);
return -ENODEV;
}
/* Register offsets */
if (ctrl->nand_version >= 0x0702)
ctrl->reg_offsets = brcmnand_regs_v72;
else if (ctrl->nand_version == 0x0701)
ctrl->reg_offsets = brcmnand_regs_v71;
else if (ctrl->nand_version >= 0x0600)
ctrl->reg_offsets = brcmnand_regs_v60;
else if (ctrl->nand_version >= 0x0500)
ctrl->reg_offsets = brcmnand_regs_v50;
else if (ctrl->nand_version >= 0x0303)
ctrl->reg_offsets = brcmnand_regs_v33;
else if (ctrl->nand_version >= 0x0201)
ctrl->reg_offsets = brcmnand_regs_v21;
/* Chip-select stride */
if (ctrl->nand_version >= 0x0701)
ctrl->reg_spacing = 0x14;
else
ctrl->reg_spacing = 0x10;
/* Per chip-select registers */
if (ctrl->nand_version >= 0x0701) {
ctrl->cs_offsets = brcmnand_cs_offsets_v71;
} else {
ctrl->cs_offsets = brcmnand_cs_offsets;
/* v3.3-5.0 have a different CS0 offset layout */
if (ctrl->nand_version >= 0x0303 &&
ctrl->nand_version <= 0x0500)
ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
}
/* Page / block sizes */
if (ctrl->nand_version >= 0x0701) {
/* >= v7.1 use nice power-of-2 values! */
ctrl->max_page_size = 16 * 1024;
ctrl->max_block_size = 2 * 1024 * 1024;
} else {
if (ctrl->nand_version >= 0x0304)
ctrl->page_sizes = page_sizes_v3_4;
else if (ctrl->nand_version >= 0x0202)
ctrl->page_sizes = page_sizes_v2_2;
else
ctrl->page_sizes = page_sizes_v2_1;
if (ctrl->nand_version >= 0x0202)
ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT;
else
ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT_v2_1;
if (ctrl->nand_version >= 0x0600)
ctrl->block_sizes = block_sizes_v6;
else if (ctrl->nand_version >= 0x0400)
ctrl->block_sizes = block_sizes_v4;
else if (ctrl->nand_version >= 0x0202)
ctrl->block_sizes = block_sizes_v2_2;
else
ctrl->block_sizes = block_sizes_v2_1;
if (ctrl->nand_version < 0x0400) {
if (ctrl->nand_version < 0x0202)
ctrl->max_page_size = 2048;
else
ctrl->max_page_size = 4096;
ctrl->max_block_size = 512 * 1024;
}
}
/* Maximum spare area sector size (per 512B) */
if (ctrl->nand_version == 0x0702)
ctrl->max_oob = 128;
else if (ctrl->nand_version >= 0x0600)
ctrl->max_oob = 64;
else if (ctrl->nand_version >= 0x0500)
ctrl->max_oob = 32;
else
ctrl->max_oob = 16;
/* v6.0 and newer (except v6.1) have prefetch support */
if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601)
ctrl->features |= BRCMNAND_HAS_PREFETCH;
/*
* v6.x has cache mode, but it's implemented differently. Ignore it for
* now.
*/
if (ctrl->nand_version >= 0x0700)
ctrl->features |= BRCMNAND_HAS_CACHE_MODE;
if (ctrl->nand_version >= 0x0500)
ctrl->features |= BRCMNAND_HAS_1K_SECTORS;
if (ctrl->nand_version >= 0x0700)
ctrl->features |= BRCMNAND_HAS_WP;
else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
ctrl->features |= BRCMNAND_HAS_WP;
/* v7.2 has different ecc level shift in the acc register */
if (ctrl->nand_version == 0x0702)
ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT;
else
ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT;
return 0;
}
static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl)
{
/* flash_dma register offsets */
if (ctrl->nand_version >= 0x0703)
ctrl->flash_dma_offsets = flash_dma_regs_v4;
else if (ctrl->nand_version == 0x0602)
ctrl->flash_dma_offsets = flash_dma_regs_v0;
else
ctrl->flash_dma_offsets = flash_dma_regs_v1;
}
static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
enum brcmnand_reg reg)
{
u16 offs = ctrl->reg_offsets[reg];
if (offs)
return nand_readreg(ctrl, offs);
else
return 0;
}
static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl,
enum brcmnand_reg reg, u32 val)
{
u16 offs = ctrl->reg_offsets[reg];
if (offs)
nand_writereg(ctrl, offs, val);
}
static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
enum brcmnand_reg reg, u32 mask, unsigned
int shift, u32 val)
{
u32 tmp = brcmnand_read_reg(ctrl, reg);
tmp &= ~mask;
tmp |= val << shift;
brcmnand_write_reg(ctrl, reg, tmp);
}
static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
{
if (brcmnand_non_mmio_ops(ctrl))
return brcmnand_soc_read(ctrl->soc, BRCMNAND_NON_MMIO_FC_ADDR);
return __raw_readl(ctrl->nand_fc + word * 4);
}
static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
int word, u32 val)
{
if (brcmnand_non_mmio_ops(ctrl))
brcmnand_soc_write(ctrl->soc, val, BRCMNAND_NON_MMIO_FC_ADDR);
else
__raw_writel(val, ctrl->nand_fc + word * 4);
}
static inline void edu_writel(struct brcmnand_controller *ctrl,
enum edu_reg reg, u32 val)
{
u16 offs = ctrl->edu_offsets[reg];
brcmnand_writel(val, ctrl->edu_base + offs);
}
static inline u32 edu_readl(struct brcmnand_controller *ctrl,
enum edu_reg reg)
{
u16 offs = ctrl->edu_offsets[reg];
return brcmnand_readl(ctrl->edu_base + offs);
}
static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
{
/* Clear error addresses */
brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
}
static u64 brcmnand_get_uncorrecc_addr(struct brcmnand_controller *ctrl)
{
u64 err_addr;
err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR);
err_addr |= ((u64)(brcmnand_read_reg(ctrl,
BRCMNAND_UNCORR_EXT_ADDR)
& 0xffff) << 32);
return err_addr;
}
static u64 brcmnand_get_correcc_addr(struct brcmnand_controller *ctrl)
{
u64 err_addr;
err_addr = brcmnand_read_reg(ctrl, BRCMNAND_CORR_ADDR);
err_addr |= ((u64)(brcmnand_read_reg(ctrl,
BRCMNAND_CORR_EXT_ADDR)
& 0xffff) << 32);
return err_addr;
}
static void brcmnand_set_cmd_addr(struct mtd_info *mtd, u64 addr)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
(host->cs << 16) | ((addr >> 32) & 0xffff));
(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
lower_32_bits(addr));
(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
}
static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
enum brcmnand_cs_reg reg)
{
u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE];
u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE];
u8 cs_offs;
if (cs == 0 && ctrl->cs0_offsets)
cs_offs = ctrl->cs0_offsets[reg];
else
cs_offs = ctrl->cs_offsets[reg];
if (cs && offs_cs1)
return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs;
return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
}
static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
{
if (ctrl->nand_version < 0x0600)
return 1;
return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
}
static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
{
struct brcmnand_controller *ctrl = host->ctrl;
unsigned int shift = 0, bits;
enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
int cs = host->cs;
if (!ctrl->reg_offsets[reg])
return;
if (ctrl->nand_version == 0x0702)
bits = 7;
else if (ctrl->nand_version >= 0x0600)
bits = 6;
else if (ctrl->nand_version >= 0x0500)
bits = 5;
else
bits = 4;
if (ctrl->nand_version >= 0x0702) {
if (cs >= 4)
reg = BRCMNAND_CORR_THRESHOLD_EXT;
shift = (cs % 4) * bits;
} else if (ctrl->nand_version >= 0x0600) {
if (cs >= 5)
reg = BRCMNAND_CORR_THRESHOLD_EXT;
shift = (cs % 5) * bits;
}
brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val);
}
static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
{
/* Kludge for the BCMA-based NAND controller which does not actually
* shift the command
*/
if (ctrl->nand_version == 0x0304 && brcmnand_non_mmio_ops(ctrl))
return 0;
if (ctrl->nand_version < 0x0602)
return 24;
return 0;
}
static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
{
if (ctrl->nand_version == 0x0702)
return GENMASK(7, 0);
else if (ctrl->nand_version >= 0x0600)
return GENMASK(6, 0);
else if (ctrl->nand_version >= 0x0303)
return GENMASK(5, 0);
else
return GENMASK(4, 0);
}
static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
{
u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
mask <<= ACC_CONTROL_ECC_SHIFT;
/* v7.2 includes additional ECC levels */
if (ctrl->nand_version == 0x0702)
mask |= 0x7 << ACC_CONTROL_ECC_EXT_SHIFT;
return mask;
}
static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
{
struct brcmnand_controller *ctrl = host->ctrl;
u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
u32 acc_control = nand_readreg(ctrl, offs);
u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC;
if (en) {
acc_control |= ecc_flags; /* enable RD/WR ECC */
acc_control &= ~brcmnand_ecc_level_mask(ctrl);
acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift;
} else {
acc_control &= ~ecc_flags; /* disable RD/WR ECC */
acc_control &= ~brcmnand_ecc_level_mask(ctrl);
}
nand_writereg(ctrl, offs, acc_control);
}
static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
{
if (ctrl->nand_version >= 0x0702)
return 9;
else if (ctrl->nand_version >= 0x0600)
return 7;
else if (ctrl->nand_version >= 0x0500)
return 6;
else
return -1;
}
static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
{
struct brcmnand_controller *ctrl = host->ctrl;
int shift = brcmnand_sector_1k_shift(ctrl);
u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
BRCMNAND_CS_ACC_CONTROL);
if (shift < 0)
return 0;
return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
}
static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
{
struct brcmnand_controller *ctrl = host->ctrl;
int shift = brcmnand_sector_1k_shift(ctrl);
u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
BRCMNAND_CS_ACC_CONTROL);
u32 tmp;
if (shift < 0)
return;
tmp = nand_readreg(ctrl, acc_control_offs);
tmp &= ~(1 << shift);
tmp |= (!!val) << shift;
nand_writereg(ctrl, acc_control_offs, tmp);
}
/***********************************************************************
* CS_NAND_SELECT
***********************************************************************/
enum {
CS_SELECT_NAND_WP = BIT(29),
CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
};
static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
u32 mask, u32 expected_val,
unsigned long timeout_ms)
{
unsigned long limit;
u32 val;
if (!timeout_ms)
timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS;
limit = jiffies + msecs_to_jiffies(timeout_ms);
do {
val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
if ((val & mask) == expected_val)
return 0;
cpu_relax();
} while (time_after(limit, jiffies));
/*
* do a final check after time out in case the CPU was busy and the driver
* did not get enough time to perform the polling to avoid false alarms
*/
val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
if ((val & mask) == expected_val)
return 0;
dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
expected_val, val & mask);
return -ETIMEDOUT;
}
static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
{
u32 val = en ? CS_SELECT_NAND_WP : 0;
brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val);
}
/***********************************************************************
* Flash DMA
***********************************************************************/
static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
{
return ctrl->flash_dma_base;
}
static inline bool has_edu(struct brcmnand_controller *ctrl)
{
return ctrl->edu_base;
}
static inline bool use_dma(struct brcmnand_controller *ctrl)
{
return has_flash_dma(ctrl) || has_edu(ctrl);
}
static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
{
if (ctrl->pio_poll_mode)
return;
if (has_flash_dma(ctrl)) {
ctrl->flash_dma_base = NULL;
disable_irq(ctrl->dma_irq);
}
disable_irq(ctrl->irq);
ctrl->pio_poll_mode = true;
}
static inline bool flash_dma_buf_ok(const void *buf)
{
return buf && !is_vmalloc_addr(buf) &&
likely(IS_ALIGNED((uintptr_t)buf, 4));
}
static inline void flash_dma_writel(struct brcmnand_controller *ctrl,
enum flash_dma_reg dma_reg, u32 val)
{
u16 offs = ctrl->flash_dma_offsets[dma_reg];
brcmnand_writel(val, ctrl->flash_dma_base + offs);
}
static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl,
enum flash_dma_reg dma_reg)
{
u16 offs = ctrl->flash_dma_offsets[dma_reg];
return brcmnand_readl(ctrl->flash_dma_base + offs);
}
/* Low-level operation types: command, address, write, or read */
enum brcmnand_llop_type {
LL_OP_CMD,
LL_OP_ADDR,
LL_OP_WR,
LL_OP_RD,
};
/***********************************************************************
* Internal support functions
***********************************************************************/
static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl,
struct brcmnand_cfg *cfg)
{
if (ctrl->nand_version <= 0x0701)
return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
cfg->ecc_level == 15;
else
return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 &&
cfg->ecc_level == 15) ||
(cfg->spare_area_size == 28 && cfg->ecc_level == 16));
}
/*
* Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given
* the layout/configuration.
* Returns -ERRCODE on failure.
*/
static int brcmnand_hamming_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_cfg *cfg = &host->hwcfg;
int sas = cfg->spare_area_size << cfg->sector_size_1k;
int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
if (section >= sectors)
return -ERANGE;
oobregion->offset = (section * sas) + 6;
oobregion->length = 3;
return 0;
}
static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_cfg *cfg = &host->hwcfg;
int sas = cfg->spare_area_size << cfg->sector_size_1k;
int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
u32 next;
if (section > sectors)
return -ERANGE;
next = (section * sas);
if (section < sectors)
next += 6;
if (section) {
oobregion->offset = ((section - 1) * sas) + 9;
} else {
if (cfg->page_size > 512) {
/* Large page NAND uses first 2 bytes for BBI */
oobregion->offset = 2;
} else {
/* Small page NAND uses last byte before ECC for BBI */
oobregion->offset = 0;
next--;
}
}
oobregion->length = next - oobregion->offset;
return 0;
}
static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops = {
.ecc = brcmnand_hamming_ooblayout_ecc,
.free = brcmnand_hamming_ooblayout_free,
};
static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_cfg *cfg = &host->hwcfg;
int sas = cfg->spare_area_size << cfg->sector_size_1k;
int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
if (section >= sectors)
return -ERANGE;
oobregion->offset = ((section + 1) * sas) - chip->ecc.bytes;
oobregion->length = chip->ecc.bytes;
return 0;
}
static int brcmnand_bch_ooblayout_free_lp(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_cfg *cfg = &host->hwcfg;
int sas = cfg->spare_area_size << cfg->sector_size_1k;
int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
if (section >= sectors)
return -ERANGE;
if (sas <= chip->ecc.bytes)
return 0;
oobregion->offset = section * sas;
oobregion->length = sas - chip->ecc.bytes;
if (!section) {
oobregion->offset++;
oobregion->length--;
}
return 0;
}
static int brcmnand_bch_ooblayout_free_sp(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_cfg *cfg = &host->hwcfg;
int sas = cfg->spare_area_size << cfg->sector_size_1k;
if (section > 1 || sas - chip->ecc.bytes < 6 ||
(section && sas - chip->ecc.bytes == 6))
return -ERANGE;
if (!section) {
oobregion->offset = 0;
oobregion->length = 5;
} else {
oobregion->offset = 6;
oobregion->length = sas - chip->ecc.bytes - 6;
}
return 0;
}
static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops = {
.ecc = brcmnand_bch_ooblayout_ecc,
.free = brcmnand_bch_ooblayout_free_lp,
};
static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops = {
.ecc = brcmnand_bch_ooblayout_ecc,
.free = brcmnand_bch_ooblayout_free_sp,
};
static int brcmstb_choose_ecc_layout(struct brcmnand_host *host)
{
struct brcmnand_cfg *p = &host->hwcfg;
struct mtd_info *mtd = nand_to_mtd(&host->chip);
struct nand_ecc_ctrl *ecc = &host->chip.ecc;
unsigned int ecc_level = p->ecc_level;
int sas = p->spare_area_size << p->sector_size_1k;
int sectors = p->page_size / (512 << p->sector_size_1k);
if (p->sector_size_1k)
ecc_level <<= 1;
if (is_hamming_ecc(host->ctrl, p)) {
ecc->bytes = 3 * sectors;
mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops);
return 0;
}
/*
* CONTROLLER_VERSION:
* < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
* >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
* But we will just be conservative.
*/
ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8);
if (p->page_size == 512)
mtd_set_ooblayout(mtd, &brcmnand_bch_sp_ooblayout_ops);
else
mtd_set_ooblayout(mtd, &brcmnand_bch_lp_ooblayout_ops);
if (ecc->bytes >= sas) {
dev_err(&host->pdev->dev,
"error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
ecc->bytes, sas);
return -EINVAL;
}
return 0;
}
static void brcmnand_wp(struct mtd_info *mtd, int wp)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
static int old_wp = -1;
int ret;
if (old_wp != wp) {
dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
old_wp = wp;
}
/*
* make sure ctrl/flash ready before and after
* changing state of #WP pin
*/
ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
NAND_STATUS_READY,
NAND_CTRL_RDY |
NAND_STATUS_READY, 0);
if (ret)
return;
brcmnand_set_wp(ctrl, wp);
nand_status_op(chip, NULL);
/* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
ret = bcmnand_ctrl_poll_status(ctrl,
NAND_CTRL_RDY |
NAND_STATUS_READY |
NAND_STATUS_WP,
NAND_CTRL_RDY |
NAND_STATUS_READY |
(wp ? 0 : NAND_STATUS_WP), 0);
if (ret)
dev_err_ratelimited(&host->pdev->dev,
"nand #WP expected %s\n",
wp ? "on" : "off");
}
}
/* Helper functions for reading and writing OOB registers */
static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
{
u16 offset0, offset10, reg_offs;
offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE];
if (offs >= ctrl->max_oob)
return 0x77;
if (offs >= 16 && offset10)
reg_offs = offset10 + ((offs - 0x10) & ~0x03);
else
reg_offs = offset0 + (offs & ~0x03);
return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
}
static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs,
u32 data)
{
u16 offset0, offset10, reg_offs;
offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE];
offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE];
if (offs >= ctrl->max_oob)
return;
if (offs >= 16 && offset10)
reg_offs = offset10 + ((offs - 0x10) & ~0x03);
else
reg_offs = offset0 + (offs & ~0x03);
nand_writereg(ctrl, reg_offs, data);
}
/*
* read_oob_from_regs - read data from OOB registers
* @ctrl: NAND controller
* @i: sub-page sector index
* @oob: buffer to read to
* @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
* @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
*/
static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob,
int sas, int sector_1k)
{
int tbytes = sas << sector_1k;
int j;
/* Adjust OOB values for 1K sector size */
if (sector_1k && (i & 0x01))
tbytes = max(0, tbytes - (int)ctrl->max_oob);
tbytes = min_t(int, tbytes, ctrl->max_oob);
for (j = 0; j < tbytes; j++)
oob[j] = oob_reg_read(ctrl, j);
return tbytes;
}
/*
* write_oob_to_regs - write data to OOB registers
* @i: sub-page sector index
* @oob: buffer to write from
* @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
* @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
*/
static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
const u8 *oob, int sas, int sector_1k)
{
int tbytes = sas << sector_1k;
int j, k = 0;
u32 last = 0xffffffff;
u8 *plast = (u8 *)&last;
/* Adjust OOB values for 1K sector size */
if (sector_1k && (i & 0x01))
tbytes = max(0, tbytes - (int)ctrl->max_oob);
tbytes = min_t(int, tbytes, ctrl->max_oob);
/*
* tbytes may not be multiple of words. Make sure we don't read out of
* the boundary and stop at last word.
*/
for (j = 0; (j + 3) < tbytes; j += 4)
oob_reg_write(ctrl, j,
(oob[j + 0] << 24) |
(oob[j + 1] << 16) |
(oob[j + 2] << 8) |
(oob[j + 3] << 0));
/* handle the remaing bytes */
while (j < tbytes)
plast[k++] = oob[j++];
if (tbytes & 0x3)
oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last));
return tbytes;
}
static void brcmnand_edu_init(struct brcmnand_controller *ctrl)
{
/* initialize edu */
edu_writel(ctrl, EDU_ERR_STATUS, 0);
edu_readl(ctrl, EDU_ERR_STATUS);
edu_writel(ctrl, EDU_DONE, 0);
edu_writel(ctrl, EDU_DONE, 0);
edu_writel(ctrl, EDU_DONE, 0);
edu_writel(ctrl, EDU_DONE, 0);
edu_readl(ctrl, EDU_DONE);
}
/* edu irq */
static irqreturn_t brcmnand_edu_irq(int irq, void *data)
{
struct brcmnand_controller *ctrl = data;
if (ctrl->edu_count) {
ctrl->edu_count--;
while (!(edu_readl(ctrl, EDU_DONE) & EDU_DONE_MASK))
udelay(1);
edu_writel(ctrl, EDU_DONE, 0);
edu_readl(ctrl, EDU_DONE);
}
if (ctrl->edu_count) {
ctrl->edu_dram_addr += FC_BYTES;
ctrl->edu_ext_addr += FC_BYTES;
edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
edu_readl(ctrl, EDU_DRAM_ADDR);
edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
edu_readl(ctrl, EDU_EXT_ADDR);
if (ctrl->oob) {
if (ctrl->edu_cmd == EDU_CMD_READ) {
ctrl->oob += read_oob_from_regs(ctrl,
ctrl->edu_count + 1,
ctrl->oob, ctrl->sas,
ctrl->sector_size_1k);
} else {
brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
ctrl->edu_ext_addr);
brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
ctrl->oob += write_oob_to_regs(ctrl,
ctrl->edu_count,
ctrl->oob, ctrl->sas,
ctrl->sector_size_1k);
}
}
mb(); /* flush previous writes */
edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
edu_readl(ctrl, EDU_CMD);
return IRQ_HANDLED;
}
complete(&ctrl->edu_done);
return IRQ_HANDLED;
}
static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
{
struct brcmnand_controller *ctrl = data;
/* Discard all NAND_CTLRDY interrupts during DMA */
if (ctrl->dma_pending)
return IRQ_HANDLED;
/* check if you need to piggy back on the ctrlrdy irq */
if (ctrl->edu_pending) {
if (irq == ctrl->irq && ((int)ctrl->edu_irq >= 0))
/* Discard interrupts while using dedicated edu irq */
return IRQ_HANDLED;
/* no registered edu irq, call handler */
return brcmnand_edu_irq(irq, data);
}
complete(&ctrl->done);
return IRQ_HANDLED;
}
/* Handle SoC-specific interrupt hardware */
static irqreturn_t brcmnand_irq(int irq, void *data)
{
struct brcmnand_controller *ctrl = data;
if (ctrl->soc->ctlrdy_ack(ctrl->soc))
return brcmnand_ctlrdy_irq(irq, data);
return IRQ_NONE;
}
static irqreturn_t brcmnand_dma_irq(int irq, void *data)
{
struct brcmnand_controller *ctrl = data;
complete(&ctrl->dma_done);
return IRQ_HANDLED;
}
static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
{
struct brcmnand_controller *ctrl = host->ctrl;
int ret;
u64 cmd_addr;
cmd_addr = brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
/*
* If we came here through _panic_write and there is a pending
* command, try to wait for it. If it times out, rather than
* hitting BUG_ON, just return so we don't crash while crashing.
*/
if (oops_in_progress) {
if (ctrl->cmd_pending &&
bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
return;
} else
BUG_ON(ctrl->cmd_pending != 0);
ctrl->cmd_pending = cmd;
ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
WARN_ON(ret);
mb(); /* flush previous writes */
brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
cmd << brcmnand_cmd_shift(ctrl));
}
/***********************************************************************
* NAND MTD API: read/program/erase
***********************************************************************/
static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat,
unsigned int ctrl)
{
/* intentionally left blank */
}
static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
struct mtd_info *mtd = nand_to_mtd(chip);
bool err = false;
int sts;
if (mtd->oops_panic_write || ctrl->irq < 0) {
/* switch to interrupt polling and PIO mode */
disable_ctrl_irqs(ctrl);
sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
NAND_CTRL_RDY, 0);
err = sts < 0;
} else {
unsigned long timeo = msecs_to_jiffies(
NAND_POLL_STATUS_TIMEOUT_MS);
/* wait for completion interrupt */
sts = wait_for_completion_timeout(&ctrl->done, timeo);
err = !sts;
}
return err;
}
static int brcmnand_waitfunc(struct nand_chip *chip)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
bool err = false;
dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
if (ctrl->cmd_pending)
err = brcmstb_nand_wait_for_completion(chip);
ctrl->cmd_pending = 0;
if (err) {
u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
>> brcmnand_cmd_shift(ctrl);
dev_err_ratelimited(ctrl->dev,
"timeout waiting for command %#02x\n", cmd);
dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
return -ETIMEDOUT;
}
return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
INTFC_FLASH_STATUS;
}
enum {
LLOP_RE = BIT(16),
LLOP_WE = BIT(17),
LLOP_ALE = BIT(18),
LLOP_CLE = BIT(19),
LLOP_RETURN_IDLE = BIT(31),
LLOP_DATA_MASK = GENMASK(15, 0),
};
static int brcmnand_low_level_op(struct brcmnand_host *host,
enum brcmnand_llop_type type, u32 data,
bool last_op)
{
struct nand_chip *chip = &host->chip;
struct brcmnand_controller *ctrl = host->ctrl;
u32 tmp;
tmp = data & LLOP_DATA_MASK;
switch (type) {
case LL_OP_CMD:
tmp |= LLOP_WE | LLOP_CLE;
break;
case LL_OP_ADDR:
/* WE | ALE */
tmp |= LLOP_WE | LLOP_ALE;
break;
case LL_OP_WR:
/* WE */
tmp |= LLOP_WE;
break;
case LL_OP_RD:
/* RE */
tmp |= LLOP_RE;
break;
}
if (last_op)
/* RETURN_IDLE */
tmp |= LLOP_RETURN_IDLE;
dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp);
brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp);
(void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP);
return brcmnand_waitfunc(chip);
}
static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command,
int column, int page_addr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
u64 addr = (u64)page_addr << chip->page_shift;
int native_cmd = 0;
if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
command == NAND_CMD_RNDOUT)
addr = (u64)column;
/* Avoid propagating a negative, don't-care address */
else if (page_addr < 0)
addr = 0;
dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
(unsigned long long)addr);
host->last_cmd = command;
host->last_byte = 0;
host->last_addr = addr;
switch (command) {
case NAND_CMD_RESET:
native_cmd = CMD_FLASH_RESET;
break;
case NAND_CMD_STATUS:
native_cmd = CMD_STATUS_READ;
break;
case NAND_CMD_READID:
native_cmd = CMD_DEVICE_ID_READ;
break;
case NAND_CMD_READOOB:
native_cmd = CMD_SPARE_AREA_READ;
break;
case NAND_CMD_ERASE1:
native_cmd = CMD_BLOCK_ERASE;
brcmnand_wp(mtd, 0);
break;
case NAND_CMD_PARAM:
native_cmd = CMD_PARAMETER_READ;
break;
case NAND_CMD_SET_FEATURES:
case NAND_CMD_GET_FEATURES:
brcmnand_low_level_op(host, LL_OP_CMD, command, false);
brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
break;
case NAND_CMD_RNDOUT:
native_cmd = CMD_PARAMETER_CHANGE_COL;
addr &= ~((u64)(FC_BYTES - 1));
/*
* HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
* NB: hwcfg.sector_size_1k may not be initialized yet
*/
if (brcmnand_get_sector_size_1k(host)) {
host->hwcfg.sector_size_1k =
brcmnand_get_sector_size_1k(host);
brcmnand_set_sector_size_1k(host, 0);
}
break;
}
if (!native_cmd)
return;
brcmnand_set_cmd_addr(mtd, addr);
brcmnand_send_cmd(host, native_cmd);
brcmnand_waitfunc(chip);
if (native_cmd == CMD_PARAMETER_READ ||
native_cmd == CMD_PARAMETER_CHANGE_COL) {
/* Copy flash cache word-wise */
u32 *flash_cache = (u32 *)ctrl->flash_cache;
int i;
brcmnand_soc_data_bus_prepare(ctrl->soc, true);
/*
* Must cache the FLASH_CACHE now, since changes in
* SECTOR_SIZE_1K may invalidate it
*/
for (i = 0; i < FC_WORDS; i++)
/*
* Flash cache is big endian for parameter pages, at
* least on STB SoCs
*/
flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
/* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
if (host->hwcfg.sector_size_1k)
brcmnand_set_sector_size_1k(host,
host->hwcfg.sector_size_1k);
}
/* Re-enable protection is necessary only after erase */
if (command == NAND_CMD_ERASE1)
brcmnand_wp(mtd, 1);
}
static uint8_t brcmnand_read_byte(struct nand_chip *chip)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
uint8_t ret = 0;
int addr, offs;
switch (host->last_cmd) {
case NAND_CMD_READID:
if (host->last_byte < 4)
ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
(24 - (host->last_byte << 3));
else if (host->last_byte < 8)
ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
(56 - (host->last_byte << 3));
break;
case NAND_CMD_READOOB:
ret = oob_reg_read(ctrl, host->last_byte);
break;
case NAND_CMD_STATUS:
ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
INTFC_FLASH_STATUS;
if (wp_on) /* hide WP status */
ret |= NAND_STATUS_WP;
break;
case NAND_CMD_PARAM:
case NAND_CMD_RNDOUT:
addr = host->last_addr + host->last_byte;
offs = addr & (FC_BYTES - 1);
/* At FC_BYTES boundary, switch to next column */
if (host->last_byte > 0 && offs == 0)
nand_change_read_column_op(chip, addr, NULL, 0, false);
ret = ctrl->flash_cache[offs];
break;
case NAND_CMD_GET_FEATURES:
if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
ret = 0;
} else {
bool last = host->last_byte ==
ONFI_SUBFEATURE_PARAM_LEN - 1;
brcmnand_low_level_op(host, LL_OP_RD, 0, last);
ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
}
}
dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
host->last_byte++;
return ret;
}
static void brcmnand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
int i;
for (i = 0; i < len; i++, buf++)
*buf = brcmnand_read_byte(chip);
}
static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf,
int len)
{
int i;
struct brcmnand_host *host = nand_get_controller_data(chip);
switch (host->last_cmd) {
case NAND_CMD_SET_FEATURES:
for (i = 0; i < len; i++)
brcmnand_low_level_op(host, LL_OP_WR, buf[i],
(i + 1) == len);
break;
default:
BUG();
break;
}
}
/*
* Kick EDU engine
*/
static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
u8 *oob, u32 len, u8 cmd)
{
struct brcmnand_controller *ctrl = host->ctrl;
struct brcmnand_cfg *cfg = &host->hwcfg;
unsigned long timeo = msecs_to_jiffies(200);
int ret = 0;
int dir = (cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
u8 edu_cmd = (cmd == CMD_PAGE_READ ? EDU_CMD_READ : EDU_CMD_WRITE);
unsigned int trans = len >> FC_SHIFT;
dma_addr_t pa;
dev_dbg(ctrl->dev, "EDU %s %p:%p\n", ((edu_cmd == EDU_CMD_READ) ?
"read" : "write"), buf, oob);
pa = dma_map_single(ctrl->dev, buf, len, dir);
if (dma_mapping_error(ctrl->dev, pa)) {
dev_err(ctrl->dev, "unable to map buffer for EDU DMA\n");
return -ENOMEM;
}
ctrl->edu_pending = true;
ctrl->edu_dram_addr = pa;
ctrl->edu_ext_addr = addr;
ctrl->edu_cmd = edu_cmd;
ctrl->edu_count = trans;
ctrl->sas = cfg->spare_area_size;
ctrl->oob = oob;
edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr);
edu_readl(ctrl, EDU_DRAM_ADDR);
edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr);
edu_readl(ctrl, EDU_EXT_ADDR);
edu_writel(ctrl, EDU_LENGTH, FC_BYTES);
edu_readl(ctrl, EDU_LENGTH);
if (ctrl->oob && (ctrl->edu_cmd == EDU_CMD_WRITE)) {
brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
ctrl->edu_ext_addr);
brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
ctrl->oob += write_oob_to_regs(ctrl,
1,
ctrl->oob, ctrl->sas,
ctrl->sector_size_1k);
}
/* Start edu engine */
mb(); /* flush previous writes */
edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd);
edu_readl(ctrl, EDU_CMD);
if (wait_for_completion_timeout(&ctrl->edu_done, timeo) <= 0) {
dev_err(ctrl->dev,
"timeout waiting for EDU; status %#x, error status %#x\n",
edu_readl(ctrl, EDU_STATUS),
edu_readl(ctrl, EDU_ERR_STATUS));
}
dma_unmap_single(ctrl->dev, pa, len, dir);
/* read last subpage oob */
if (ctrl->oob && (ctrl->edu_cmd == EDU_CMD_READ)) {
ctrl->oob += read_oob_from_regs(ctrl,
1,
ctrl->oob, ctrl->sas,
ctrl->sector_size_1k);
}
/* for program page check NAND status */
if (((brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
INTFC_FLASH_STATUS) & NAND_STATUS_FAIL) &&
edu_cmd == EDU_CMD_WRITE) {
dev_info(ctrl->dev, "program failed at %llx\n",
(unsigned long long)addr);
ret = -EIO;
}
/* Make sure the EDU status is clean */
if (edu_readl(ctrl, EDU_STATUS) & EDU_STATUS_ACTIVE)
dev_warn(ctrl->dev, "EDU still active: %#x\n",
edu_readl(ctrl, EDU_STATUS));
if (unlikely(edu_readl(ctrl, EDU_ERR_STATUS) & EDU_ERR_STATUS_ERRACK)) {
dev_warn(ctrl->dev, "EDU RBUS error at addr %llx\n",
(unsigned long long)addr);
ret = -EIO;
}
ctrl->edu_pending = false;
brcmnand_edu_init(ctrl);
edu_writel(ctrl, EDU_STOP, 0); /* force stop */
edu_readl(ctrl, EDU_STOP);
if (!ret && edu_cmd == EDU_CMD_READ) {
u64 err_addr = 0;
/*
* check for ECC errors here, subpage ECC errors are
* retained in ECC error address register
*/
err_addr = brcmnand_get_uncorrecc_addr(ctrl);
if (!err_addr) {
err_addr = brcmnand_get_correcc_addr(ctrl);
if (err_addr)
ret = -EUCLEAN;
} else
ret = -EBADMSG;
}
return ret;
}
/*
* Construct a FLASH_DMA descriptor as part of a linked list. You must know the
* following ahead of time:
* - Is this descriptor the beginning or end of a linked list?
* - What is the (DMA) address of the next descriptor in the linked list?
*/
static int brcmnand_fill_dma_desc(struct brcmnand_host *host,
struct brcm_nand_dma_desc *desc, u64 addr,
dma_addr_t buf, u32 len, u8 dma_cmd,
bool begin, bool end,
dma_addr_t next_desc)
{
memset(desc, 0, sizeof(*desc));
/* Descriptors are written in native byte order (wordwise) */
desc->next_desc = lower_32_bits(next_desc);
desc->next_desc_ext = upper_32_bits(next_desc);
desc->cmd_irq = (dma_cmd << 24) |
(end ? (0x03 << 8) : 0) | /* IRQ | STOP */
(!!begin) | ((!!end) << 1); /* head, tail */
#ifdef CONFIG_CPU_BIG_ENDIAN
desc->cmd_irq |= 0x01 << 12;
#endif
desc->dram_addr = lower_32_bits(buf);
desc->dram_addr_ext = upper_32_bits(buf);
desc->tfr_len = len;
desc->total_len = len;
desc->flash_addr = lower_32_bits(addr);
desc->flash_addr_ext = upper_32_bits(addr);
desc->cs = host->cs;
desc->status_valid = 0x01;
return 0;
}
/*
* Kick the FLASH_DMA engine, with a given DMA descriptor
*/
static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
{
struct brcmnand_controller *ctrl = host->ctrl;
unsigned long timeo = msecs_to_jiffies(100);
flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
(void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
if (ctrl->nand_version > 0x0602) {
flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT,
upper_32_bits(desc));
(void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
}
/* Start FLASH_DMA engine */
ctrl->dma_pending = true;
mb(); /* flush previous writes */
flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */
if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) {
dev_err(ctrl->dev,
"timeout waiting for DMA; status %#x, error status %#x\n",
flash_dma_readl(ctrl, FLASH_DMA_STATUS),
flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS));
}
ctrl->dma_pending = false;
flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */
}
static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
u8 *oob, u32 len, u8 dma_cmd)
{
struct brcmnand_controller *ctrl = host->ctrl;
dma_addr_t buf_pa;
int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
buf_pa = dma_map_single(ctrl->dev, buf, len, dir);
if (dma_mapping_error(ctrl->dev, buf_pa)) {
dev_err(ctrl->dev, "unable to map buffer for DMA\n");
return -ENOMEM;
}
brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len,
dma_cmd, true, true, 0);
brcmnand_dma_run(host, ctrl->dma_pa);
dma_unmap_single(ctrl->dev, buf_pa, len, dir);
if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR)
return -EBADMSG;
else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR)
return -EUCLEAN;
return 0;
}
/*
* Assumes proper CS is already set
*/
static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
u64 addr, unsigned int trans, u32 *buf,
u8 *oob, u64 *err_addr)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
int i, j, ret = 0;
brcmnand_clear_ecc_addr(ctrl);
for (i = 0; i < trans; i++, addr += FC_BYTES) {
brcmnand_set_cmd_addr(mtd, addr);
/* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
brcmnand_send_cmd(host, CMD_PAGE_READ);
brcmnand_waitfunc(chip);
if (likely(buf)) {
brcmnand_soc_data_bus_prepare(ctrl->soc, false);
for (j = 0; j < FC_WORDS; j++, buf++)
*buf = brcmnand_read_fc(ctrl, j);
brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
}
if (oob)
oob += read_oob_from_regs(ctrl, i, oob,
mtd->oobsize / trans,
host->hwcfg.sector_size_1k);
if (ret != -EBADMSG) {
*err_addr = brcmnand_get_uncorrecc_addr(ctrl);
if (*err_addr)
ret = -EBADMSG;
}
if (!ret) {
*err_addr = brcmnand_get_correcc_addr(ctrl);
if (*err_addr)
ret = -EUCLEAN;
}
}
return ret;
}
/*
* Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC
* error
*
* Because the HW ECC signals an ECC error if an erase paged has even a single
* bitflip, we must check each ECC error to see if it is actually an erased
* page with bitflips, not a truly corrupted page.
*
* On a real error, return a negative error code (-EBADMSG for ECC error), and
* buf will contain raw data.
* Otherwise, buf gets filled with 0xffs and return the maximum number of
* bitflips-per-ECC-sector to the caller.
*
*/
static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
struct nand_chip *chip, void *buf, u64 addr)
{
struct mtd_oob_region ecc;
int i;
int bitflips = 0;
int page = addr >> chip->page_shift;
int ret;
void *ecc_bytes;
void *ecc_chunk;
if (!buf)
buf = nand_get_data_buf(chip);
/* read without ecc for verification */
ret = chip->ecc.read_page_raw(chip, buf, true, page);
if (ret)
return ret;
for (i = 0; i < chip->ecc.steps; i++) {
ecc_chunk = buf + chip->ecc.size * i;
mtd_ooblayout_ecc(mtd, i, &ecc);
ecc_bytes = chip->oob_poi + ecc.offset;
ret = nand_check_erased_ecc_chunk(ecc_chunk, chip->ecc.size,
ecc_bytes, ecc.length,
NULL, 0,
chip->ecc.strength);
if (ret < 0)
return ret;
bitflips = max(bitflips, ret);
}
return bitflips;
}
static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
u64 addr, unsigned int trans, u32 *buf, u8 *oob)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
u64 err_addr = 0;
int err;
bool retry = true;
bool edu_err = false;
dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
try_dmaread:
brcmnand_clear_ecc_addr(ctrl);
if (ctrl->dma_trans && (has_edu(ctrl) || !oob) &&
flash_dma_buf_ok(buf)) {
err = ctrl->dma_trans(host, addr, buf, oob,
trans * FC_BYTES,
CMD_PAGE_READ);
if (err) {
if (mtd_is_bitflip_or_eccerr(err))
err_addr = addr;
else
return -EIO;
}
if (has_edu(ctrl) && err_addr)
edu_err = true;
} else {
if (oob)
memset(oob, 0x99, mtd->oobsize);
err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
oob, &err_addr);
}
if (mtd_is_eccerr(err)) {
/*
* On controller version and 7.0, 7.1 , DMA read after a
* prior PIO read that reported uncorrectable error,
* the DMA engine captures this error following DMA read
* cleared only on subsequent DMA read, so just retry once
* to clear a possible false error reported for current DMA
* read
*/
if ((ctrl->nand_version == 0x0700) ||
(ctrl->nand_version == 0x0701)) {
if (retry) {
retry = false;
goto try_dmaread;
}
}
/*
* Controller version 7.2 has hw encoder to detect erased page
* bitflips, apply sw verification for older controllers only
*/
if (ctrl->nand_version < 0x0702) {
err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
addr);
/* erased page bitflips corrected */
if (err >= 0)
return err;
}
dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
(unsigned long long)err_addr);
mtd->ecc_stats.failed++;
/* NAND layer expects zero on ECC errors */
return 0;
}
if (mtd_is_bitflip(err)) {
unsigned int corrected = brcmnand_count_corrected(ctrl);
/* in case of EDU correctable error we read again using PIO */
if (edu_err)
err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
oob, &err_addr);
dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
(unsigned long long)err_addr);
mtd->ecc_stats.corrected += corrected;
/* Always exceed the software-imposed threshold */
return max(mtd->bitflip_threshold, corrected);
}
return 0;
}
static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
nand_read_page_op(chip, page, 0, NULL, 0);
return brcmnand_read(mtd, chip, host->last_addr,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
}
static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
int ret;
nand_read_page_op(chip, page, 0, NULL, 0);
brcmnand_set_ecc_enabled(host, 0);
ret = brcmnand_read(mtd, chip, host->last_addr,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
return ret;
}
static int brcmnand_read_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
return brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
mtd->writesize >> FC_SHIFT,
NULL, (u8 *)chip->oob_poi);
}
static int brcmnand_read_oob_raw(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
brcmnand_set_ecc_enabled(host, 0);
brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
mtd->writesize >> FC_SHIFT,
NULL, (u8 *)chip->oob_poi);
brcmnand_set_ecc_enabled(host, 1);
return 0;
}
static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
u64 addr, const u32 *buf, u8 *oob)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
unsigned int i, j, trans = mtd->writesize >> FC_SHIFT;
int status, ret = 0;
dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf);
if (unlikely((unsigned long)buf & 0x03)) {
dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf);
buf = (u32 *)((unsigned long)buf & ~0x03);
}
brcmnand_wp(mtd, 0);
for (i = 0; i < ctrl->max_oob; i += 4)
oob_reg_write(ctrl, i, 0xffffffff);
if (mtd->oops_panic_write)
/* switch to interrupt polling and PIO mode */
disable_ctrl_irqs(ctrl);
if (use_dma(ctrl) && (has_edu(ctrl) || !oob) && flash_dma_buf_ok(buf)) {
if (ctrl->dma_trans(host, addr, (u32 *)buf, oob, mtd->writesize,
CMD_PROGRAM_PAGE))
ret = -EIO;
goto out;
}
for (i = 0; i < trans; i++, addr += FC_BYTES) {
/* full address MUST be set before populating FC */
brcmnand_set_cmd_addr(mtd, addr);
if (buf) {
brcmnand_soc_data_bus_prepare(ctrl->soc, false);
for (j = 0; j < FC_WORDS; j++, buf++)
brcmnand_write_fc(ctrl, j, *buf);
brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
} else if (oob) {
for (j = 0; j < FC_WORDS; j++)
brcmnand_write_fc(ctrl, j, 0xffffffff);
}
if (oob) {
oob += write_oob_to_regs(ctrl, i, oob,
mtd->oobsize / trans,
host->hwcfg.sector_size_1k);
}
/* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
status = brcmnand_waitfunc(chip);
if (status & NAND_STATUS_FAIL) {
dev_info(ctrl->dev, "program failed at %llx\n",
(unsigned long long)addr);
ret = -EIO;
goto out;
}
}
out:
brcmnand_wp(mtd, 1);
return ret;
}
static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
return nand_prog_page_end_op(chip);
}
static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
brcmnand_set_ecc_enabled(host, 0);
brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
return nand_prog_page_end_op(chip);
}
static int brcmnand_write_oob(struct nand_chip *chip, int page)
{
return brcmnand_write(nand_to_mtd(chip), chip,
(u64)page << chip->page_shift, NULL,
chip->oob_poi);
}
static int brcmnand_write_oob_raw(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
int ret;
brcmnand_set_ecc_enabled(host, 0);
ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL,
(u8 *)chip->oob_poi);
brcmnand_set_ecc_enabled(host, 1);
return ret;
}
/***********************************************************************
* Per-CS setup (1 NAND device)
***********************************************************************/
static int brcmnand_set_cfg(struct brcmnand_host *host,
struct brcmnand_cfg *cfg)
{
struct brcmnand_controller *ctrl = host->ctrl;
struct nand_chip *chip = &host->chip;
u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
BRCMNAND_CS_CFG_EXT);
u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
BRCMNAND_CS_ACC_CONTROL);
u8 block_size = 0, page_size = 0, device_size = 0;
u32 tmp;
if (ctrl->block_sizes) {
int i, found;
for (i = 0, found = 0; ctrl->block_sizes[i]; i++)
if (ctrl->block_sizes[i] * 1024 == cfg->block_size) {
block_size = i;
found = 1;
}
if (!found) {
dev_warn(ctrl->dev, "invalid block size %u\n",
cfg->block_size);
return -EINVAL;
}
} else {
block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE);
}
if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size &&
cfg->block_size > ctrl->max_block_size)) {
dev_warn(ctrl->dev, "invalid block size %u\n",
cfg->block_size);
block_size = 0;
}
if (ctrl->page_sizes) {
int i, found;
for (i = 0, found = 0; ctrl->page_sizes[i]; i++)
if (ctrl->page_sizes[i] == cfg->page_size) {
page_size = i;
found = 1;
}
if (!found) {
dev_warn(ctrl->dev, "invalid page size %u\n",
cfg->page_size);
return -EINVAL;
}
} else {
page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE);
}
if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size &&
cfg->page_size > ctrl->max_page_size)) {
dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size);
return -EINVAL;
}
if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) {
dev_warn(ctrl->dev, "invalid device size 0x%llx\n",
(unsigned long long)cfg->device_size);
return -EINVAL;
}
device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE);
tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) |
(cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) |
(cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) |
(!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) |
(device_size << CFG_DEVICE_SIZE_SHIFT);
if (cfg_offs == cfg_ext_offs) {
tmp |= (page_size << ctrl->page_size_shift) |
(block_size << CFG_BLK_SIZE_SHIFT);
nand_writereg(ctrl, cfg_offs, tmp);
} else {
nand_writereg(ctrl, cfg_offs, tmp);
tmp = (page_size << CFG_EXT_PAGE_SIZE_SHIFT) |
(block_size << CFG_EXT_BLK_SIZE_SHIFT);
nand_writereg(ctrl, cfg_ext_offs, tmp);
}
tmp = nand_readreg(ctrl, acc_control_offs);
tmp &= ~brcmnand_ecc_level_mask(ctrl);
tmp &= ~brcmnand_spare_area_mask(ctrl);
if (ctrl->nand_version >= 0x0302) {
tmp |= cfg->ecc_level << ctrl->ecc_level_shift;
tmp |= cfg->spare_area_size;
}
nand_writereg(ctrl, acc_control_offs, tmp);
brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
/* threshold = ceil(BCH-level * 0.75) */
brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4));
return 0;
}
static void brcmnand_print_cfg(struct brcmnand_host *host,
char *buf, struct brcmnand_cfg *cfg)
{
buf += sprintf(buf,
"%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
(unsigned long long)cfg->device_size >> 20,
cfg->block_size >> 10,
cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size,
cfg->page_size >= 1024 ? "KiB" : "B",
cfg->spare_area_size, cfg->device_width);
/* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
if (is_hamming_ecc(host->ctrl, cfg))
sprintf(buf, ", Hamming ECC");
else if (cfg->sector_size_1k)
sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
else
sprintf(buf, ", BCH-%u", cfg->ecc_level);
}
/*
* Minimum number of bytes to address a page. Calculated as:
* roundup(log2(size / page-size) / 8)
*
* NB: the following does not "round up" for non-power-of-2 'size'; but this is
* OK because many other things will break if 'size' is irregular...
*/
static inline int get_blk_adr_bytes(u64 size, u32 writesize)
{
return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3;
}
static int brcmnand_setup_dev(struct brcmnand_host *host)
{
struct mtd_info *mtd = nand_to_mtd(&host->chip);
struct nand_chip *chip = &host->chip;
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(&chip->base);
struct nand_memory_organization *memorg =
nanddev_get_memorg(&chip->base);
struct brcmnand_controller *ctrl = host->ctrl;
struct brcmnand_cfg *cfg = &host->hwcfg;
char msg[128];
u32 offs, tmp, oob_sector;
int ret;
memset(cfg, 0, sizeof(*cfg));
ret = of_property_read_u32(nand_get_flash_node(chip),
"brcm,nand-oob-sector-size",
&oob_sector);
if (ret) {
/* Use detected size */
cfg->spare_area_size = mtd->oobsize /
(mtd->writesize >> FC_SHIFT);
} else {
cfg->spare_area_size = oob_sector;
}
if (cfg->spare_area_size > ctrl->max_oob)
cfg->spare_area_size = ctrl->max_oob;
/*
* Set mtd and memorg oobsize to be consistent with controller's
* spare_area_size, as the rest is inaccessible.
*/
mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
memorg->oobsize = mtd->oobsize;
cfg->device_size = mtd->size;
cfg->block_size = mtd->erasesize;
cfg->page_size = mtd->writesize;
cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8;
cfg->col_adr_bytes = 2;
cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
chip->ecc.engine_type);
return -EINVAL;
}
if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) {
if (chip->ecc.strength == 1 && chip->ecc.size == 512)
/* Default to Hamming for 1-bit ECC, if unspecified */
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
else
/* Otherwise, BCH */
chip->ecc.algo = NAND_ECC_ALGO_BCH;
}
if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING &&
(chip->ecc.strength != 1 || chip->ecc.size != 512)) {
dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
chip->ecc.strength, chip->ecc.size);
return -EINVAL;
}
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
(!chip->ecc.size || !chip->ecc.strength)) {
if (requirements->step_size && requirements->strength) {
/* use detected ECC parameters */
chip->ecc.size = requirements->step_size;
chip->ecc.strength = requirements->strength;
dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n",
chip->ecc.size, chip->ecc.strength);
}
}
switch (chip->ecc.size) {
case 512:
if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
cfg->ecc_level = 15;
else
cfg->ecc_level = chip->ecc.strength;
cfg->sector_size_1k = 0;
break;
case 1024:
if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) {
dev_err(ctrl->dev, "1KB sectors not supported\n");
return -EINVAL;
}
if (chip->ecc.strength & 0x1) {
dev_err(ctrl->dev,
"odd ECC not supported with 1KB sectors\n");
return -EINVAL;
}
cfg->ecc_level = chip->ecc.strength >> 1;
cfg->sector_size_1k = 1;
break;
default:
dev_err(ctrl->dev, "unsupported ECC size: %d\n",
chip->ecc.size);
return -EINVAL;
}
cfg->ful_adr_bytes = cfg->blk_adr_bytes;
if (mtd->writesize > 512)
cfg->ful_adr_bytes += cfg->col_adr_bytes;
else
cfg->ful_adr_bytes += 1;
ret = brcmnand_set_cfg(host, cfg);
if (ret)
return ret;
brcmnand_set_ecc_enabled(host, 1);
brcmnand_print_cfg(host, msg, cfg);
dev_info(ctrl->dev, "detected %s\n", msg);
/* Configure ACC_CONTROL */
offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
tmp = nand_readreg(ctrl, offs);
tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
tmp &= ~ACC_CONTROL_RD_ERASED;
/* We need to turn on Read from erased paged protected by ECC */
if (ctrl->nand_version >= 0x0702)
tmp |= ACC_CONTROL_RD_ERASED;
tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
if (ctrl->features & BRCMNAND_HAS_PREFETCH)
tmp &= ~ACC_CONTROL_PREFETCH;
nand_writereg(ctrl, offs, tmp);
return 0;
}
static int brcmnand_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
int ret;
chip->options |= NAND_NO_SUBPAGE_WRITE;
/*
* Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
* to/from, and have nand_base pass us a bounce buffer instead, as
* needed.
*/
chip->options |= NAND_USES_DMA;
if (chip->bbt_options & NAND_BBT_USE_FLASH)
chip->bbt_options |= NAND_BBT_NO_OOB;
if (brcmnand_setup_dev(host))
return -ENXIO;
chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
/* only use our internal HW threshold */
mtd->bitflip_threshold = 1;
ret = brcmstb_choose_ecc_layout(host);
/* If OOB is written with ECC enabled it will cause ECC errors */
if (is_hamming_ecc(host->ctrl, &host->hwcfg)) {
chip->ecc.write_oob = brcmnand_write_oob_raw;
chip->ecc.read_oob = brcmnand_read_oob_raw;
}
return ret;
}
static const struct nand_controller_ops brcmnand_controller_ops = {
.attach_chip = brcmnand_attach_chip,
};
static int brcmnand_init_cs(struct brcmnand_host *host,
const char * const *part_probe_types)
{
struct brcmnand_controller *ctrl = host->ctrl;
struct device *dev = ctrl->dev;
struct mtd_info *mtd;
struct nand_chip *chip;
int ret;
u16 cfg_offs;
mtd = nand_to_mtd(&host->chip);
chip = &host->chip;
nand_set_controller_data(chip, host);
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "brcmnand.%d",
host->cs);
if (!mtd->name)
return -ENOMEM;
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl;
chip->legacy.cmdfunc = brcmnand_cmdfunc;
chip->legacy.waitfunc = brcmnand_waitfunc;
chip->legacy.read_byte = brcmnand_read_byte;
chip->legacy.read_buf = brcmnand_read_buf;
chip->legacy.write_buf = brcmnand_write_buf;
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.read_page = brcmnand_read_page;
chip->ecc.write_page = brcmnand_write_page;
chip->ecc.read_page_raw = brcmnand_read_page_raw;
chip->ecc.write_page_raw = brcmnand_write_page_raw;
chip->ecc.write_oob_raw = brcmnand_write_oob_raw;
chip->ecc.read_oob_raw = brcmnand_read_oob_raw;
chip->ecc.read_oob = brcmnand_read_oob;
chip->ecc.write_oob = brcmnand_write_oob;
chip->controller = &ctrl->controller;
/*
* The bootloader might have configured 16bit mode but
* NAND READID command only works in 8bit mode. We force
* 8bit mode here to ensure that NAND READID commands works.
*/
cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
nand_writereg(ctrl, cfg_offs,
nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
ret = nand_scan(chip, 1);
if (ret)
return ret;
ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
if (ret)
nand_cleanup(chip);
return ret;
}
static void brcmnand_save_restore_cs_config(struct brcmnand_host *host,
int restore)
{
struct brcmnand_controller *ctrl = host->ctrl;
u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
BRCMNAND_CS_CFG_EXT);
u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
BRCMNAND_CS_ACC_CONTROL);
u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1);
u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2);
if (restore) {
nand_writereg(ctrl, cfg_offs, host->hwcfg.config);
if (cfg_offs != cfg_ext_offs)
nand_writereg(ctrl, cfg_ext_offs,
host->hwcfg.config_ext);
nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control);
nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1);
nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2);
} else {
host->hwcfg.config = nand_readreg(ctrl, cfg_offs);
if (cfg_offs != cfg_ext_offs)
host->hwcfg.config_ext =
nand_readreg(ctrl, cfg_ext_offs);
host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs);
host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs);
host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs);
}
}
static int brcmnand_suspend(struct device *dev)
{
struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
struct brcmnand_host *host;
list_for_each_entry(host, &ctrl->host_list, node)
brcmnand_save_restore_cs_config(host, 0);
ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT);
ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR);
ctrl->corr_stat_threshold =
brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD);
if (has_flash_dma(ctrl))
ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
else if (has_edu(ctrl))
ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
return 0;
}
static int brcmnand_resume(struct device *dev)
{
struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
struct brcmnand_host *host;
if (has_flash_dma(ctrl)) {
flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode);
flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
}
if (has_edu(ctrl)) {
ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
edu_writel(ctrl, EDU_CONFIG, ctrl->edu_config);
edu_readl(ctrl, EDU_CONFIG);
brcmnand_edu_init(ctrl);
}
brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
ctrl->corr_stat_threshold);
if (ctrl->soc) {
/* Clear/re-enable interrupt */
ctrl->soc->ctlrdy_ack(ctrl->soc);
ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
}
list_for_each_entry(host, &ctrl->host_list, node) {
struct nand_chip *chip = &host->chip;
brcmnand_save_restore_cs_config(host, 1);
/* Reset the chip, required by some chips after power-up */
nand_reset_op(chip);
}
return 0;
}
const struct dev_pm_ops brcmnand_pm_ops = {
.suspend = brcmnand_suspend,
.resume = brcmnand_resume,
};
EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
static const struct of_device_id __maybe_unused brcmnand_of_match[] = {
{ .compatible = "brcm,brcmnand-v2.1" },
{ .compatible = "brcm,brcmnand-v2.2" },
{ .compatible = "brcm,brcmnand-v4.0" },
{ .compatible = "brcm,brcmnand-v5.0" },
{ .compatible = "brcm,brcmnand-v6.0" },
{ .compatible = "brcm,brcmnand-v6.1" },
{ .compatible = "brcm,brcmnand-v6.2" },
{ .compatible = "brcm,brcmnand-v7.0" },
{ .compatible = "brcm,brcmnand-v7.1" },
{ .compatible = "brcm,brcmnand-v7.2" },
{ .compatible = "brcm,brcmnand-v7.3" },
{},
};
MODULE_DEVICE_TABLE(of, brcmnand_of_match);
/***********************************************************************
* Platform driver setup (per controller)
***********************************************************************/
static int brcmnand_edu_setup(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
struct resource *res;
int ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-edu");
if (res) {
ctrl->edu_base = devm_ioremap_resource(dev, res);
if (IS_ERR(ctrl->edu_base))
return PTR_ERR(ctrl->edu_base);
ctrl->edu_offsets = edu_regs;
edu_writel(ctrl, EDU_CONFIG, EDU_CONFIG_MODE_NAND |
EDU_CONFIG_SWAP_CFG);
edu_readl(ctrl, EDU_CONFIG);
/* initialize edu */
brcmnand_edu_init(ctrl);
ctrl->edu_irq = platform_get_irq_optional(pdev, 1);
if (ctrl->edu_irq < 0) {
dev_warn(dev,
"FLASH EDU enabled, using ctlrdy irq\n");
} else {
ret = devm_request_irq(dev, ctrl->edu_irq,
brcmnand_edu_irq, 0,
"brcmnand-edu", ctrl);
if (ret < 0) {
dev_err(ctrl->dev, "can't allocate IRQ %d: error %d\n",
ctrl->edu_irq, ret);
return ret;
}
dev_info(dev, "FLASH EDU enabled using irq %u\n",
ctrl->edu_irq);
}
}
return 0;
}
int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
{
struct brcmnand_platform_data *pd = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node, *child;
struct brcmnand_controller *ctrl;
struct brcmnand_host *host;
struct resource *res;
int ret;
if (dn && !of_match_node(brcmnand_of_match, dn))
return -ENODEV;
ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
dev_set_drvdata(dev, ctrl);
ctrl->dev = dev;
ctrl->soc = soc;
/* Enable the static key if the soc provides I/O operations indicating
* that a non-memory mapped IO access path must be used
*/
if (brcmnand_soc_has_ops(ctrl->soc))
static_branch_enable(&brcmnand_soc_has_ops_key);
init_completion(&ctrl->done);
init_completion(&ctrl->dma_done);
init_completion(&ctrl->edu_done);
nand_controller_init(&ctrl->controller);
ctrl->controller.ops = &brcmnand_controller_ops;
INIT_LIST_HEAD(&ctrl->host_list);
/* NAND register range */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctrl->nand_base = devm_ioremap_resource(dev, res);
if (IS_ERR(ctrl->nand_base) && !brcmnand_soc_has_ops(soc))
return PTR_ERR(ctrl->nand_base);
/* Enable clock before using NAND registers */
ctrl->clk = devm_clk_get(dev, "nand");
if (!IS_ERR(ctrl->clk)) {
ret = clk_prepare_enable(ctrl->clk);
if (ret)
return ret;
} else {
ret = PTR_ERR(ctrl->clk);
if (ret == -EPROBE_DEFER)
return ret;
ctrl->clk = NULL;
}
/* Initialize NAND revision */
ret = brcmnand_revision_init(ctrl);
if (ret)
goto err;
/*
* Most chips have this cache at a fixed offset within 'nand' block.
* Some must specify this region separately.
*/
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache");
if (res) {
ctrl->nand_fc = devm_ioremap_resource(dev, res);
if (IS_ERR(ctrl->nand_fc)) {
ret = PTR_ERR(ctrl->nand_fc);
goto err;
}
} else {
ctrl->nand_fc = ctrl->nand_base +
ctrl->reg_offsets[BRCMNAND_FC_BASE];
}
/* FLASH_DMA */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma");
if (res) {
ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
if (IS_ERR(ctrl->flash_dma_base)) {
ret = PTR_ERR(ctrl->flash_dma_base);
goto err;
}
/* initialize the dma version */
brcmnand_flash_dma_revision_init(ctrl);
ret = -EIO;
if (ctrl->nand_version >= 0x0700)
ret = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(40));
if (ret)
ret = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(32));
if (ret)
goto err;
/* linked-list and stop on error */
flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
/* Allocate descriptor(s) */
ctrl->dma_desc = dmam_alloc_coherent(dev,
sizeof(*ctrl->dma_desc),
&ctrl->dma_pa, GFP_KERNEL);
if (!ctrl->dma_desc) {
ret = -ENOMEM;
goto err;
}
ctrl->dma_irq = platform_get_irq(pdev, 1);
if ((int)ctrl->dma_irq < 0) {
dev_err(dev, "missing FLASH_DMA IRQ\n");
ret = -ENODEV;
goto err;
}
ret = devm_request_irq(dev, ctrl->dma_irq,
brcmnand_dma_irq, 0, DRV_NAME,
ctrl);
if (ret < 0) {
dev_err(dev, "can't allocate IRQ %d: error %d\n",
ctrl->dma_irq, ret);
goto err;
}
dev_info(dev, "enabling FLASH_DMA\n");
/* set flash dma transfer function to call */
ctrl->dma_trans = brcmnand_dma_trans;
} else {
ret = brcmnand_edu_setup(pdev);
if (ret < 0)
goto err;
if (has_edu(ctrl))
/* set edu transfer function to call */
ctrl->dma_trans = brcmnand_edu_trans;
}
/* Disable automatic device ID config, direct addressing */
brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0);
/* Disable XOR addressing */
brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
if (ctrl->features & BRCMNAND_HAS_WP) {
/* Permanently disable write protection */
if (wp_on == 2)
brcmnand_set_wp(ctrl, false);
} else {
wp_on = 0;
}
/* IRQ */
ctrl->irq = platform_get_irq_optional(pdev, 0);
if (ctrl->irq > 0) {
/*
* Some SoCs integrate this controller (e.g., its interrupt bits) in
* interesting ways
*/
if (soc) {
ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
DRV_NAME, ctrl);
/* Enable interrupt */
ctrl->soc->ctlrdy_ack(ctrl->soc);
ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
} else {
/* Use standard interrupt infrastructure */
ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
DRV_NAME, ctrl);
}
if (ret < 0) {
dev_err(dev, "can't allocate IRQ %d: error %d\n",
ctrl->irq, ret);
goto err;
}
}
for_each_available_child_of_node(dn, child) {
if (of_device_is_compatible(child, "brcm,nandcs")) {
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
of_node_put(child);
ret = -ENOMEM;
goto err;
}
host->pdev = pdev;
host->ctrl = ctrl;
ret = of_property_read_u32(child, "reg", &host->cs);
if (ret) {
dev_err(dev, "can't get chip-select\n");
devm_kfree(dev, host);
continue;
}
nand_set_flash_node(&host->chip, child);
ret = brcmnand_init_cs(host, NULL);
if (ret) {
if (ret == -EPROBE_DEFER) {
of_node_put(child);
goto err;
}
devm_kfree(dev, host);
continue; /* Try all chip-selects */
}
list_add_tail(&host->node, &ctrl->host_list);
}
}
if (!list_empty(&ctrl->host_list))
return 0;
if (!pd) {
ret = -ENODEV;
goto err;
}
/* If we got there we must have been probing via platform data */
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
ret = -ENOMEM;
goto err;
}
host->pdev = pdev;
host->ctrl = ctrl;
host->cs = pd->chip_select;
host->chip.ecc.size = pd->ecc_stepsize;
host->chip.ecc.strength = pd->ecc_strength;
ret = brcmnand_init_cs(host, pd->part_probe_types);
if (ret)
goto err;
list_add_tail(&host->node, &ctrl->host_list);
/* No chip-selects could initialize properly */
if (list_empty(&ctrl->host_list)) {
ret = -ENODEV;
goto err;
}
return 0;
err:
clk_disable_unprepare(ctrl->clk);
return ret;
}
EXPORT_SYMBOL_GPL(brcmnand_probe);
int brcmnand_remove(struct platform_device *pdev)
{
struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
struct brcmnand_host *host;
struct nand_chip *chip;
int ret;
list_for_each_entry(host, &ctrl->host_list, node) {
chip = &host->chip;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
}
clk_disable_unprepare(ctrl->clk);
dev_set_drvdata(&pdev->dev, NULL);
return 0;
}
EXPORT_SYMBOL_GPL(brcmnand_remove);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Kevin Cernekee");
MODULE_AUTHOR("Brian Norris");
MODULE_DESCRIPTION("NAND driver for Broadcom chips");
MODULE_ALIAS("platform:brcmnand");
| linux-master | drivers/mtd/nand/raw/brcmnand/brcmnand.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2015 Broadcom Corporation
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "brcmnand.h"
struct bcm63138_nand_soc {
struct brcmnand_soc soc;
void __iomem *base;
};
#define BCM63138_NAND_INT_STATUS 0x00
#define BCM63138_NAND_INT_EN 0x04
enum {
BCM63138_CTLRDY = BIT(4),
};
static bool bcm63138_nand_intc_ack(struct brcmnand_soc *soc)
{
struct bcm63138_nand_soc *priv =
container_of(soc, struct bcm63138_nand_soc, soc);
void __iomem *mmio = priv->base + BCM63138_NAND_INT_STATUS;
u32 val = brcmnand_readl(mmio);
if (val & BCM63138_CTLRDY) {
brcmnand_writel(val & ~BCM63138_CTLRDY, mmio);
return true;
}
return false;
}
static void bcm63138_nand_intc_set(struct brcmnand_soc *soc, bool en)
{
struct bcm63138_nand_soc *priv =
container_of(soc, struct bcm63138_nand_soc, soc);
void __iomem *mmio = priv->base + BCM63138_NAND_INT_EN;
u32 val = brcmnand_readl(mmio);
if (en)
val |= BCM63138_CTLRDY;
else
val &= ~BCM63138_CTLRDY;
brcmnand_writel(val, mmio);
}
static int bcm63138_nand_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bcm63138_nand_soc *priv;
struct brcmnand_soc *soc;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
soc = &priv->soc;
priv->base = devm_platform_ioremap_resource_byname(pdev, "nand-int-base");
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
soc->ctlrdy_ack = bcm63138_nand_intc_ack;
soc->ctlrdy_set_enabled = bcm63138_nand_intc_set;
return brcmnand_probe(pdev, soc);
}
static const struct of_device_id bcm63138_nand_of_match[] = {
{ .compatible = "brcm,nand-bcm63138" },
{},
};
MODULE_DEVICE_TABLE(of, bcm63138_nand_of_match);
static struct platform_driver bcm63138_nand_driver = {
.probe = bcm63138_nand_probe,
.remove = brcmnand_remove,
.driver = {
.name = "bcm63138_nand",
.pm = &brcmnand_pm_ops,
.of_match_table = bcm63138_nand_of_match,
}
};
module_platform_driver(bcm63138_nand_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Brian Norris");
MODULE_DESCRIPTION("NAND driver for BCM63138");
| linux-master | drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015 Simon Arlott
*
* Derived from bcm63138_nand.c:
* Copyright © 2015 Broadcom Corporation
*
* Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/include/bcm963xx/63268_map_part.h:
* Copyright 2000-2010 Broadcom Corporation
*
* Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/flash/nandflash.c:
* Copyright 2000-2010 Broadcom Corporation
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "brcmnand.h"
struct bcm6368_nand_soc {
struct brcmnand_soc soc;
void __iomem *base;
};
#define BCM6368_NAND_INT 0x00
#define BCM6368_NAND_STATUS_SHIFT 0
#define BCM6368_NAND_STATUS_MASK (0xfff << BCM6368_NAND_STATUS_SHIFT)
#define BCM6368_NAND_ENABLE_SHIFT 16
#define BCM6368_NAND_ENABLE_MASK (0xffff << BCM6368_NAND_ENABLE_SHIFT)
#define BCM6368_NAND_BASE_ADDR0 0x04
#define BCM6368_NAND_BASE_ADDR1 0x0c
enum {
BCM6368_NP_READ = BIT(0),
BCM6368_BLOCK_ERASE = BIT(1),
BCM6368_COPY_BACK = BIT(2),
BCM6368_PAGE_PGM = BIT(3),
BCM6368_CTRL_READY = BIT(4),
BCM6368_DEV_RBPIN = BIT(5),
BCM6368_ECC_ERR_UNC = BIT(6),
BCM6368_ECC_ERR_CORR = BIT(7),
};
static bool bcm6368_nand_intc_ack(struct brcmnand_soc *soc)
{
struct bcm6368_nand_soc *priv =
container_of(soc, struct bcm6368_nand_soc, soc);
void __iomem *mmio = priv->base + BCM6368_NAND_INT;
u32 val = brcmnand_readl(mmio);
if (val & (BCM6368_CTRL_READY << BCM6368_NAND_STATUS_SHIFT)) {
/* Ack interrupt */
val &= ~BCM6368_NAND_STATUS_MASK;
val |= BCM6368_CTRL_READY << BCM6368_NAND_STATUS_SHIFT;
brcmnand_writel(val, mmio);
return true;
}
return false;
}
static void bcm6368_nand_intc_set(struct brcmnand_soc *soc, bool en)
{
struct bcm6368_nand_soc *priv =
container_of(soc, struct bcm6368_nand_soc, soc);
void __iomem *mmio = priv->base + BCM6368_NAND_INT;
u32 val = brcmnand_readl(mmio);
/* Don't ack any interrupts */
val &= ~BCM6368_NAND_STATUS_MASK;
if (en)
val |= BCM6368_CTRL_READY << BCM6368_NAND_ENABLE_SHIFT;
else
val &= ~(BCM6368_CTRL_READY << BCM6368_NAND_ENABLE_SHIFT);
brcmnand_writel(val, mmio);
}
static int bcm6368_nand_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bcm6368_nand_soc *priv;
struct brcmnand_soc *soc;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
soc = &priv->soc;
priv->base = devm_platform_ioremap_resource_byname(pdev, "nand-int-base");
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
soc->ctlrdy_ack = bcm6368_nand_intc_ack;
soc->ctlrdy_set_enabled = bcm6368_nand_intc_set;
/* Disable and ack all interrupts */
brcmnand_writel(0, priv->base + BCM6368_NAND_INT);
brcmnand_writel(BCM6368_NAND_STATUS_MASK,
priv->base + BCM6368_NAND_INT);
return brcmnand_probe(pdev, soc);
}
static const struct of_device_id bcm6368_nand_of_match[] = {
{ .compatible = "brcm,nand-bcm6368" },
{},
};
MODULE_DEVICE_TABLE(of, bcm6368_nand_of_match);
static struct platform_driver bcm6368_nand_driver = {
.probe = bcm6368_nand_probe,
.remove = brcmnand_remove,
.driver = {
.name = "bcm6368_nand",
.pm = &brcmnand_pm_ops,
.of_match_table = bcm6368_nand_of_match,
}
};
module_platform_driver(bcm6368_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Simon Arlott");
MODULE_DESCRIPTION("NAND driver for BCM6368");
| linux-master | drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2021 Broadcom
*/
#include <linux/bcma/bcma.h>
#include <linux/bcma/bcma_driver_chipcommon.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include "brcmnand.h"
struct brcmnand_bcma_soc {
struct brcmnand_soc soc;
struct bcma_drv_cc *cc;
};
static inline bool brcmnand_bcma_needs_swapping(u32 offset)
{
switch (offset) {
case BCMA_CC_NAND_SPARE_RD0:
case BCMA_CC_NAND_SPARE_RD4:
case BCMA_CC_NAND_SPARE_RD8:
case BCMA_CC_NAND_SPARE_RD12:
case BCMA_CC_NAND_SPARE_WR0:
case BCMA_CC_NAND_SPARE_WR4:
case BCMA_CC_NAND_SPARE_WR8:
case BCMA_CC_NAND_SPARE_WR12:
case BCMA_CC_NAND_DEVID:
case BCMA_CC_NAND_DEVID_X:
case BCMA_CC_NAND_SPARE_RD16:
case BCMA_CC_NAND_SPARE_RD20:
case BCMA_CC_NAND_SPARE_RD24:
case BCMA_CC_NAND_SPARE_RD28:
return true;
}
return false;
}
static inline struct brcmnand_bcma_soc *to_bcma_soc(struct brcmnand_soc *soc)
{
return container_of(soc, struct brcmnand_bcma_soc, soc);
}
static u32 brcmnand_bcma_read_reg(struct brcmnand_soc *soc, u32 offset)
{
struct brcmnand_bcma_soc *sc = to_bcma_soc(soc);
u32 val;
/* Offset into the NAND block and deal with the flash cache separately */
if (offset == BRCMNAND_NON_MMIO_FC_ADDR)
offset = BCMA_CC_NAND_CACHE_DATA;
else
offset += BCMA_CC_NAND_REVISION;
val = bcma_cc_read32(sc->cc, offset);
/* Swap if necessary */
if (brcmnand_bcma_needs_swapping(offset))
val = be32_to_cpu((__force __be32)val);
return val;
}
static void brcmnand_bcma_write_reg(struct brcmnand_soc *soc, u32 val,
u32 offset)
{
struct brcmnand_bcma_soc *sc = to_bcma_soc(soc);
/* Offset into the NAND block */
if (offset == BRCMNAND_NON_MMIO_FC_ADDR)
offset = BCMA_CC_NAND_CACHE_DATA;
else
offset += BCMA_CC_NAND_REVISION;
/* Swap if necessary */
if (brcmnand_bcma_needs_swapping(offset))
val = (__force u32)cpu_to_be32(val);
bcma_cc_write32(sc->cc, offset, val);
}
static struct brcmnand_io_ops brcmnand_bcma_io_ops = {
.read_reg = brcmnand_bcma_read_reg,
.write_reg = brcmnand_bcma_write_reg,
};
static void brcmnand_bcma_prepare_data_bus(struct brcmnand_soc *soc, bool prepare,
bool is_param)
{
struct brcmnand_bcma_soc *sc = to_bcma_soc(soc);
/* Reset the cache address to ensure we are already accessing the
* beginning of a sub-page.
*/
bcma_cc_write32(sc->cc, BCMA_CC_NAND_CACHE_ADDR, 0);
}
static int brcmnand_bcma_nand_probe(struct platform_device *pdev)
{
struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
struct brcmnand_bcma_soc *soc;
soc = devm_kzalloc(&pdev->dev, sizeof(*soc), GFP_KERNEL);
if (!soc)
return -ENOMEM;
soc->cc = container_of(nflash, struct bcma_drv_cc, nflash);
soc->soc.prepare_data_bus = brcmnand_bcma_prepare_data_bus;
soc->soc.ops = &brcmnand_bcma_io_ops;
if (soc->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
dev_err(&pdev->dev, "Use bcm47xxnflash for 4706!\n");
return -ENODEV;
}
return brcmnand_probe(pdev, &soc->soc);
}
static struct platform_driver brcmnand_bcma_nand_driver = {
.probe = brcmnand_bcma_nand_probe,
.remove = brcmnand_remove,
.driver = {
.name = "bcma_brcmnand",
.pm = &brcmnand_pm_ops,
}
};
module_platform_driver(brcmnand_bcma_nand_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Broadcom");
MODULE_DESCRIPTION("NAND controller driver glue for BCMA chips");
| linux-master | drivers/mtd/nand/raw/brcmnand/bcma_nand.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2015 Broadcom Corporation
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "brcmnand.h"
static const struct of_device_id brcmstb_nand_of_match[] = {
{ .compatible = "brcm,brcmnand" },
{},
};
MODULE_DEVICE_TABLE(of, brcmstb_nand_of_match);
static int brcmstb_nand_probe(struct platform_device *pdev)
{
return brcmnand_probe(pdev, NULL);
}
static struct platform_driver brcmstb_nand_driver = {
.probe = brcmstb_nand_probe,
.remove = brcmnand_remove,
.driver = {
.name = "brcmstb_nand",
.pm = &brcmnand_pm_ops,
.of_match_table = brcmstb_nand_of_match,
}
};
module_platform_driver(brcmstb_nand_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Brian Norris");
MODULE_DESCRIPTION("NAND driver for Broadcom STB chips");
| linux-master | drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Ingenic JZ47xx NAND driver
*
* Copyright (c) 2015 Imagination Technologies
* Author: Alex Smith <[email protected]>
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/jz4780-nemc.h>
#include "ingenic_ecc.h"
#define DRV_NAME "ingenic-nand"
struct jz_soc_info {
unsigned long data_offset;
unsigned long addr_offset;
unsigned long cmd_offset;
const struct mtd_ooblayout_ops *oob_layout;
bool oob_first;
};
struct ingenic_nand_cs {
unsigned int bank;
void __iomem *base;
};
struct ingenic_nfc {
struct device *dev;
struct ingenic_ecc *ecc;
const struct jz_soc_info *soc_info;
struct nand_controller controller;
unsigned int num_banks;
struct list_head chips;
struct ingenic_nand_cs cs[];
};
struct ingenic_nand {
struct nand_chip chip;
struct list_head chip_list;
struct gpio_desc *busy_gpio;
struct gpio_desc *wp_gpio;
unsigned int reading: 1;
};
static inline struct ingenic_nand *to_ingenic_nand(struct mtd_info *mtd)
{
return container_of(mtd_to_nand(mtd), struct ingenic_nand, chip);
}
static inline struct ingenic_nfc *to_ingenic_nfc(struct nand_controller *ctrl)
{
return container_of(ctrl, struct ingenic_nfc, controller);
}
static int qi_lb60_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
if (section || !ecc->total)
return -ERANGE;
oobregion->length = ecc->total;
oobregion->offset = 12;
return 0;
}
static int qi_lb60_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
if (section)
return -ERANGE;
oobregion->length = mtd->oobsize - ecc->total - 12;
oobregion->offset = 12 + ecc->total;
return 0;
}
static const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = {
.ecc = qi_lb60_ooblayout_ecc,
.free = qi_lb60_ooblayout_free,
};
static int jz4725b_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
if (section || !ecc->total)
return -ERANGE;
oobregion->length = ecc->total;
oobregion->offset = 3;
return 0;
}
static int jz4725b_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
if (section)
return -ERANGE;
oobregion->length = mtd->oobsize - ecc->total - 3;
oobregion->offset = 3 + ecc->total;
return 0;
}
static const struct mtd_ooblayout_ops jz4725b_ooblayout_ops = {
.ecc = jz4725b_ooblayout_ecc,
.free = jz4725b_ooblayout_free,
};
static void ingenic_nand_ecc_hwctl(struct nand_chip *chip, int mode)
{
struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
nand->reading = (mode == NAND_ECC_READ);
}
static int ingenic_nand_ecc_calculate(struct nand_chip *chip, const u8 *dat,
u8 *ecc_code)
{
struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
struct ingenic_ecc_params params;
/*
* Don't need to generate the ECC when reading, the ECC engine does it
* for us as part of decoding/correction.
*/
if (nand->reading)
return 0;
params.size = nand->chip.ecc.size;
params.bytes = nand->chip.ecc.bytes;
params.strength = nand->chip.ecc.strength;
return ingenic_ecc_calculate(nfc->ecc, ¶ms, dat, ecc_code);
}
static int ingenic_nand_ecc_correct(struct nand_chip *chip, u8 *dat,
u8 *read_ecc, u8 *calc_ecc)
{
struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
struct ingenic_ecc_params params;
params.size = nand->chip.ecc.size;
params.bytes = nand->chip.ecc.bytes;
params.strength = nand->chip.ecc.strength;
return ingenic_ecc_correct(nfc->ecc, ¶ms, dat, read_ecc);
}
static int ingenic_nand_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct ingenic_nfc *nfc = to_ingenic_nfc(chip->controller);
int eccbytes;
if (chip->ecc.strength == 4) {
/* JZ4740 uses 9 bytes of ECC to correct maximum 4 errors */
chip->ecc.bytes = 9;
} else {
chip->ecc.bytes = fls((1 + 8) * chip->ecc.size) *
(chip->ecc.strength / 8);
}
switch (chip->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_ON_HOST:
if (!nfc->ecc) {
dev_err(nfc->dev, "HW ECC selected, but ECC controller not found\n");
return -ENODEV;
}
chip->ecc.hwctl = ingenic_nand_ecc_hwctl;
chip->ecc.calculate = ingenic_nand_ecc_calculate;
chip->ecc.correct = ingenic_nand_ecc_correct;
fallthrough;
case NAND_ECC_ENGINE_TYPE_SOFT:
dev_info(nfc->dev, "using %s (strength %d, size %d, bytes %d)\n",
(nfc->ecc) ? "hardware ECC" : "software ECC",
chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
break;
case NAND_ECC_ENGINE_TYPE_NONE:
dev_info(nfc->dev, "not using ECC\n");
break;
default:
dev_err(nfc->dev, "ECC mode %d not supported\n",
chip->ecc.engine_type);
return -EINVAL;
}
/* The NAND core will generate the ECC layout for SW ECC */
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return 0;
/* Generate ECC layout. ECC codes are right aligned in the OOB area. */
eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes;
if (eccbytes > mtd->oobsize - 2) {
dev_err(nfc->dev,
"invalid ECC config: required %d ECC bytes, but only %d are available",
eccbytes, mtd->oobsize - 2);
return -EINVAL;
}
/*
* The generic layout for BBT markers will most likely overlap with our
* ECC bytes in the OOB, so move the BBT markers outside the OOB area.
*/
if (chip->bbt_options & NAND_BBT_USE_FLASH)
chip->bbt_options |= NAND_BBT_NO_OOB;
if (nfc->soc_info->oob_first)
chip->ecc.read_page = nand_read_page_hwecc_oob_first;
/* For legacy reasons we use a different layout on the qi,lb60 board. */
if (of_machine_is_compatible("qi,lb60"))
mtd_set_ooblayout(mtd, &qi_lb60_ooblayout_ops);
else if (nfc->soc_info->oob_layout)
mtd_set_ooblayout(mtd, nfc->soc_info->oob_layout);
else
mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
return 0;
}
static int ingenic_nand_exec_instr(struct nand_chip *chip,
struct ingenic_nand_cs *cs,
const struct nand_op_instr *instr)
{
struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
struct ingenic_nfc *nfc = to_ingenic_nfc(chip->controller);
unsigned int i;
switch (instr->type) {
case NAND_OP_CMD_INSTR:
writeb(instr->ctx.cmd.opcode,
cs->base + nfc->soc_info->cmd_offset);
return 0;
case NAND_OP_ADDR_INSTR:
for (i = 0; i < instr->ctx.addr.naddrs; i++)
writeb(instr->ctx.addr.addrs[i],
cs->base + nfc->soc_info->addr_offset);
return 0;
case NAND_OP_DATA_IN_INSTR:
if (instr->ctx.data.force_8bit ||
!(chip->options & NAND_BUSWIDTH_16))
ioread8_rep(cs->base + nfc->soc_info->data_offset,
instr->ctx.data.buf.in,
instr->ctx.data.len);
else
ioread16_rep(cs->base + nfc->soc_info->data_offset,
instr->ctx.data.buf.in,
instr->ctx.data.len);
return 0;
case NAND_OP_DATA_OUT_INSTR:
if (instr->ctx.data.force_8bit ||
!(chip->options & NAND_BUSWIDTH_16))
iowrite8_rep(cs->base + nfc->soc_info->data_offset,
instr->ctx.data.buf.out,
instr->ctx.data.len);
else
iowrite16_rep(cs->base + nfc->soc_info->data_offset,
instr->ctx.data.buf.out,
instr->ctx.data.len);
return 0;
case NAND_OP_WAITRDY_INSTR:
if (!nand->busy_gpio)
return nand_soft_waitrdy(chip,
instr->ctx.waitrdy.timeout_ms);
return nand_gpio_waitrdy(chip, nand->busy_gpio,
instr->ctx.waitrdy.timeout_ms);
default:
break;
}
return -EINVAL;
}
static int ingenic_nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
struct ingenic_nand_cs *cs;
unsigned int i;
int ret = 0;
if (check_only)
return 0;
cs = &nfc->cs[op->cs];
jz4780_nemc_assert(nfc->dev, cs->bank, true);
for (i = 0; i < op->ninstrs; i++) {
ret = ingenic_nand_exec_instr(chip, cs, &op->instrs[i]);
if (ret)
break;
if (op->instrs[i].delay_ns)
ndelay(op->instrs[i].delay_ns);
}
jz4780_nemc_assert(nfc->dev, cs->bank, false);
return ret;
}
static const struct nand_controller_ops ingenic_nand_controller_ops = {
.attach_chip = ingenic_nand_attach_chip,
.exec_op = ingenic_nand_exec_op,
};
static int ingenic_nand_init_chip(struct platform_device *pdev,
struct ingenic_nfc *nfc,
struct device_node *np,
unsigned int chipnr)
{
struct device *dev = &pdev->dev;
struct ingenic_nand *nand;
struct ingenic_nand_cs *cs;
struct nand_chip *chip;
struct mtd_info *mtd;
const __be32 *reg;
int ret = 0;
cs = &nfc->cs[chipnr];
reg = of_get_property(np, "reg", NULL);
if (!reg)
return -EINVAL;
cs->bank = be32_to_cpu(*reg);
jz4780_nemc_set_type(nfc->dev, cs->bank, JZ4780_NEMC_BANK_NAND);
cs->base = devm_platform_ioremap_resource(pdev, chipnr);
if (IS_ERR(cs->base))
return PTR_ERR(cs->base);
nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
if (!nand)
return -ENOMEM;
nand->busy_gpio = devm_gpiod_get_optional(dev, "rb", GPIOD_IN);
if (IS_ERR(nand->busy_gpio)) {
ret = PTR_ERR(nand->busy_gpio);
dev_err(dev, "failed to request busy GPIO: %d\n", ret);
return ret;
}
/*
* The rb-gpios semantics was undocumented and qi,lb60 (along with
* the ingenic driver) got it wrong. The active state encodes the
* NAND ready state, which is high level. Since there's no signal
* inverter on this board, it should be active-high. Let's fix that
* here for older DTs so we can re-use the generic nand_gpio_waitrdy()
* helper, and be consistent with what other drivers do.
*/
if (of_machine_is_compatible("qi,lb60") &&
gpiod_is_active_low(nand->busy_gpio))
gpiod_toggle_active_low(nand->busy_gpio);
nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
if (IS_ERR(nand->wp_gpio)) {
ret = PTR_ERR(nand->wp_gpio);
dev_err(dev, "failed to request WP GPIO: %d\n", ret);
return ret;
}
chip = &nand->chip;
mtd = nand_to_mtd(chip);
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
cs->bank);
if (!mtd->name)
return -ENOMEM;
mtd->dev.parent = dev;
chip->options = NAND_NO_SUBPAGE_WRITE;
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->controller = &nfc->controller;
nand_set_flash_node(chip, np);
chip->controller->ops = &ingenic_nand_controller_ops;
ret = nand_scan(chip, 1);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
nand_cleanup(chip);
return ret;
}
list_add_tail(&nand->chip_list, &nfc->chips);
return 0;
}
static void ingenic_nand_cleanup_chips(struct ingenic_nfc *nfc)
{
struct ingenic_nand *ingenic_chip;
struct nand_chip *chip;
int ret;
while (!list_empty(&nfc->chips)) {
ingenic_chip = list_first_entry(&nfc->chips,
struct ingenic_nand, chip_list);
chip = &ingenic_chip->chip;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
list_del(&ingenic_chip->chip_list);
}
}
static int ingenic_nand_init_chips(struct ingenic_nfc *nfc,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np;
int i = 0;
int ret;
int num_chips = of_get_child_count(dev->of_node);
if (num_chips > nfc->num_banks) {
dev_err(dev, "found %d chips but only %d banks\n",
num_chips, nfc->num_banks);
return -EINVAL;
}
for_each_child_of_node(dev->of_node, np) {
ret = ingenic_nand_init_chip(pdev, nfc, np, i);
if (ret) {
ingenic_nand_cleanup_chips(nfc);
of_node_put(np);
return ret;
}
i++;
}
return 0;
}
static int ingenic_nand_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
unsigned int num_banks;
struct ingenic_nfc *nfc;
int ret;
num_banks = jz4780_nemc_num_banks(dev);
if (num_banks == 0) {
dev_err(dev, "no banks found\n");
return -ENODEV;
}
nfc = devm_kzalloc(dev, struct_size(nfc, cs, num_banks), GFP_KERNEL);
if (!nfc)
return -ENOMEM;
nfc->soc_info = device_get_match_data(dev);
if (!nfc->soc_info)
return -EINVAL;
/*
* Check for ECC HW before we call nand_scan_ident, to prevent us from
* having to call it again if the ECC driver returns -EPROBE_DEFER.
*/
nfc->ecc = of_ingenic_ecc_get(dev->of_node);
if (IS_ERR(nfc->ecc))
return PTR_ERR(nfc->ecc);
nfc->dev = dev;
nfc->num_banks = num_banks;
nand_controller_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
ret = ingenic_nand_init_chips(nfc, pdev);
if (ret) {
if (nfc->ecc)
ingenic_ecc_release(nfc->ecc);
return ret;
}
platform_set_drvdata(pdev, nfc);
return 0;
}
static void ingenic_nand_remove(struct platform_device *pdev)
{
struct ingenic_nfc *nfc = platform_get_drvdata(pdev);
if (nfc->ecc)
ingenic_ecc_release(nfc->ecc);
ingenic_nand_cleanup_chips(nfc);
}
static const struct jz_soc_info jz4740_soc_info = {
.data_offset = 0x00000000,
.cmd_offset = 0x00008000,
.addr_offset = 0x00010000,
.oob_first = true,
};
static const struct jz_soc_info jz4725b_soc_info = {
.data_offset = 0x00000000,
.cmd_offset = 0x00008000,
.addr_offset = 0x00010000,
.oob_layout = &jz4725b_ooblayout_ops,
};
static const struct jz_soc_info jz4780_soc_info = {
.data_offset = 0x00000000,
.cmd_offset = 0x00400000,
.addr_offset = 0x00800000,
};
static const struct of_device_id ingenic_nand_dt_match[] = {
{ .compatible = "ingenic,jz4740-nand", .data = &jz4740_soc_info },
{ .compatible = "ingenic,jz4725b-nand", .data = &jz4725b_soc_info },
{ .compatible = "ingenic,jz4780-nand", .data = &jz4780_soc_info },
{},
};
MODULE_DEVICE_TABLE(of, ingenic_nand_dt_match);
static struct platform_driver ingenic_nand_driver = {
.probe = ingenic_nand_probe,
.remove_new = ingenic_nand_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = ingenic_nand_dt_match,
},
};
module_platform_driver(ingenic_nand_driver);
MODULE_AUTHOR("Alex Smith <[email protected]>");
MODULE_AUTHOR("Harvey Hunt <[email protected]>");
MODULE_DESCRIPTION("Ingenic JZ47xx NAND driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* JZ47xx ECC common code
*
* Copyright (c) 2015 Imagination Technologies
* Author: Alex Smith <[email protected]>
*/
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include "ingenic_ecc.h"
/**
* ingenic_ecc_calculate() - calculate ECC for a data buffer
* @ecc: ECC device.
* @params: ECC parameters.
* @buf: input buffer with raw data.
* @ecc_code: output buffer with ECC.
*
* Return: 0 on success, -ETIMEDOUT if timed out while waiting for ECC
* controller.
*/
int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
struct ingenic_ecc_params *params,
const u8 *buf, u8 *ecc_code)
{
return ecc->ops->calculate(ecc, params, buf, ecc_code);
}
/**
* ingenic_ecc_correct() - detect and correct bit errors
* @ecc: ECC device.
* @params: ECC parameters.
* @buf: raw data read from the chip.
* @ecc_code: ECC read from the chip.
*
* Given the raw data and the ECC read from the NAND device, detects and
* corrects errors in the data.
*
* Return: the number of bit errors corrected, -EBADMSG if there are too many
* errors to correct or -ETIMEDOUT if we timed out waiting for the controller.
*/
int ingenic_ecc_correct(struct ingenic_ecc *ecc,
struct ingenic_ecc_params *params,
u8 *buf, u8 *ecc_code)
{
return ecc->ops->correct(ecc, params, buf, ecc_code);
}
/**
* ingenic_ecc_get() - get the ECC controller device
* @np: ECC device tree node.
*
* Gets the ECC controller device from the specified device tree node. The
* device must be released with ingenic_ecc_release() when it is no longer being
* used.
*
* Return: a pointer to ingenic_ecc, errors are encoded into the pointer.
* PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
*/
static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
{
struct platform_device *pdev;
struct ingenic_ecc *ecc;
pdev = of_find_device_by_node(np);
if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
if (!platform_get_drvdata(pdev)) {
put_device(&pdev->dev);
return ERR_PTR(-EPROBE_DEFER);
}
ecc = platform_get_drvdata(pdev);
clk_prepare_enable(ecc->clk);
return ecc;
}
/**
* of_ingenic_ecc_get() - get the ECC controller from a DT node
* @of_node: the node that contains an ecc-engine property.
*
* Get the ecc-engine property from the given device tree
* node and pass it to ingenic_ecc_get to do the work.
*
* Return: a pointer to ingenic_ecc, errors are encoded into the pointer.
* PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
*/
struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *of_node)
{
struct ingenic_ecc *ecc = NULL;
struct device_node *np;
np = of_parse_phandle(of_node, "ecc-engine", 0);
/*
* If the ecc-engine property is not found, check for the deprecated
* ingenic,bch-controller property
*/
if (!np)
np = of_parse_phandle(of_node, "ingenic,bch-controller", 0);
if (np) {
ecc = ingenic_ecc_get(np);
of_node_put(np);
}
return ecc;
}
/**
* ingenic_ecc_release() - release the ECC controller device
* @ecc: ECC device.
*/
void ingenic_ecc_release(struct ingenic_ecc *ecc)
{
clk_disable_unprepare(ecc->clk);
put_device(ecc->dev);
}
int ingenic_ecc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ingenic_ecc *ecc;
ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
if (!ecc)
return -ENOMEM;
ecc->ops = device_get_match_data(dev);
if (!ecc->ops)
return -EINVAL;
ecc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ecc->base))
return PTR_ERR(ecc->base);
ecc->ops->disable(ecc);
ecc->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ecc->clk)) {
dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
return PTR_ERR(ecc->clk);
}
mutex_init(&ecc->lock);
ecc->dev = dev;
platform_set_drvdata(pdev, ecc);
return 0;
}
EXPORT_SYMBOL(ingenic_ecc_probe);
| linux-master | drivers/mtd/nand/raw/ingenic/ingenic_ecc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* JZ4740 ECC controller driver
*
* Copyright (c) 2019 Paul Cercueil <[email protected]>
*
* based on jz4740-nand.c
*/
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include "ingenic_ecc.h"
#define JZ_REG_NAND_ECC_CTRL 0x00
#define JZ_REG_NAND_DATA 0x04
#define JZ_REG_NAND_PAR0 0x08
#define JZ_REG_NAND_PAR1 0x0C
#define JZ_REG_NAND_PAR2 0x10
#define JZ_REG_NAND_IRQ_STAT 0x14
#define JZ_REG_NAND_IRQ_CTRL 0x18
#define JZ_REG_NAND_ERR(x) (0x1C + ((x) << 2))
#define JZ_NAND_ECC_CTRL_PAR_READY BIT(4)
#define JZ_NAND_ECC_CTRL_ENCODING BIT(3)
#define JZ_NAND_ECC_CTRL_RS BIT(2)
#define JZ_NAND_ECC_CTRL_RESET BIT(1)
#define JZ_NAND_ECC_CTRL_ENABLE BIT(0)
#define JZ_NAND_STATUS_ERR_COUNT (BIT(31) | BIT(30) | BIT(29))
#define JZ_NAND_STATUS_PAD_FINISH BIT(4)
#define JZ_NAND_STATUS_DEC_FINISH BIT(3)
#define JZ_NAND_STATUS_ENC_FINISH BIT(2)
#define JZ_NAND_STATUS_UNCOR_ERROR BIT(1)
#define JZ_NAND_STATUS_ERROR BIT(0)
static const uint8_t empty_block_ecc[] = {
0xcd, 0x9d, 0x90, 0x58, 0xf4, 0x8b, 0xff, 0xb7, 0x6f
};
static void jz4740_ecc_reset(struct ingenic_ecc *ecc, bool calc_ecc)
{
uint32_t reg;
/* Clear interrupt status */
writel(0, ecc->base + JZ_REG_NAND_IRQ_STAT);
/* Initialize and enable ECC hardware */
reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
reg |= JZ_NAND_ECC_CTRL_RESET;
reg |= JZ_NAND_ECC_CTRL_ENABLE;
reg |= JZ_NAND_ECC_CTRL_RS;
if (calc_ecc) /* calculate ECC from data */
reg |= JZ_NAND_ECC_CTRL_ENCODING;
else /* correct data from ECC */
reg &= ~JZ_NAND_ECC_CTRL_ENCODING;
writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
}
static int jz4740_ecc_calculate(struct ingenic_ecc *ecc,
struct ingenic_ecc_params *params,
const u8 *buf, u8 *ecc_code)
{
uint32_t reg, status;
unsigned int timeout = 1000;
int i;
jz4740_ecc_reset(ecc, true);
do {
status = readl(ecc->base + JZ_REG_NAND_IRQ_STAT);
} while (!(status & JZ_NAND_STATUS_ENC_FINISH) && --timeout);
if (timeout == 0)
return -ETIMEDOUT;
reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
for (i = 0; i < params->bytes; ++i)
ecc_code[i] = readb(ecc->base + JZ_REG_NAND_PAR0 + i);
/*
* If the written data is completely 0xff, we also want to write 0xff as
* ECC, otherwise we will get in trouble when doing subpage writes.
*/
if (memcmp(ecc_code, empty_block_ecc, sizeof(empty_block_ecc)) == 0)
memset(ecc_code, 0xff, sizeof(empty_block_ecc));
return 0;
}
static void jz_nand_correct_data(uint8_t *buf, int index, int mask)
{
int offset = index & 0x7;
uint16_t data;
index += (index >> 3);
data = buf[index];
data |= buf[index + 1] << 8;
mask ^= (data >> offset) & 0x1ff;
data &= ~(0x1ff << offset);
data |= (mask << offset);
buf[index] = data & 0xff;
buf[index + 1] = (data >> 8) & 0xff;
}
static int jz4740_ecc_correct(struct ingenic_ecc *ecc,
struct ingenic_ecc_params *params,
u8 *buf, u8 *ecc_code)
{
int i, error_count, index;
uint32_t reg, status, error;
unsigned int timeout = 1000;
jz4740_ecc_reset(ecc, false);
for (i = 0; i < params->bytes; ++i)
writeb(ecc_code[i], ecc->base + JZ_REG_NAND_PAR0 + i);
reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
reg |= JZ_NAND_ECC_CTRL_PAR_READY;
writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
do {
status = readl(ecc->base + JZ_REG_NAND_IRQ_STAT);
} while (!(status & JZ_NAND_STATUS_DEC_FINISH) && --timeout);
if (timeout == 0)
return -ETIMEDOUT;
reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
if (status & JZ_NAND_STATUS_ERROR) {
if (status & JZ_NAND_STATUS_UNCOR_ERROR)
return -EBADMSG;
error_count = (status & JZ_NAND_STATUS_ERR_COUNT) >> 29;
for (i = 0; i < error_count; ++i) {
error = readl(ecc->base + JZ_REG_NAND_ERR(i));
index = ((error >> 16) & 0x1ff) - 1;
if (index >= 0 && index < params->size)
jz_nand_correct_data(buf, index, error & 0x1ff);
}
return error_count;
}
return 0;
}
static void jz4740_ecc_disable(struct ingenic_ecc *ecc)
{
u32 reg;
writel(0, ecc->base + JZ_REG_NAND_IRQ_STAT);
reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL);
reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL);
}
static const struct ingenic_ecc_ops jz4740_ecc_ops = {
.disable = jz4740_ecc_disable,
.calculate = jz4740_ecc_calculate,
.correct = jz4740_ecc_correct,
};
static const struct of_device_id jz4740_ecc_dt_match[] = {
{ .compatible = "ingenic,jz4740-ecc", .data = &jz4740_ecc_ops },
{},
};
MODULE_DEVICE_TABLE(of, jz4740_ecc_dt_match);
static struct platform_driver jz4740_ecc_driver = {
.probe = ingenic_ecc_probe,
.driver = {
.name = "jz4740-ecc",
.of_match_table = jz4740_ecc_dt_match,
},
};
module_platform_driver(jz4740_ecc_driver);
MODULE_AUTHOR("Paul Cercueil <[email protected]>");
MODULE_DESCRIPTION("Ingenic JZ4740 ECC controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mtd/nand/raw/ingenic/jz4740_ecc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* JZ4725B BCH controller driver
*
* Copyright (C) 2019 Paul Cercueil <[email protected]>
*
* Based on jz4780_bch.c
*/
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include "ingenic_ecc.h"
#define BCH_BHCR 0x0
#define BCH_BHCSR 0x4
#define BCH_BHCCR 0x8
#define BCH_BHCNT 0xc
#define BCH_BHDR 0x10
#define BCH_BHPAR0 0x14
#define BCH_BHERR0 0x28
#define BCH_BHINT 0x24
#define BCH_BHINTES 0x3c
#define BCH_BHINTEC 0x40
#define BCH_BHINTE 0x38
#define BCH_BHCR_ENCE BIT(3)
#define BCH_BHCR_BSEL BIT(2)
#define BCH_BHCR_INIT BIT(1)
#define BCH_BHCR_BCHE BIT(0)
#define BCH_BHCNT_DEC_COUNT_SHIFT 16
#define BCH_BHCNT_DEC_COUNT_MASK (0x3ff << BCH_BHCNT_DEC_COUNT_SHIFT)
#define BCH_BHCNT_ENC_COUNT_SHIFT 0
#define BCH_BHCNT_ENC_COUNT_MASK (0x3ff << BCH_BHCNT_ENC_COUNT_SHIFT)
#define BCH_BHERR_INDEX0_SHIFT 0
#define BCH_BHERR_INDEX0_MASK (0x1fff << BCH_BHERR_INDEX0_SHIFT)
#define BCH_BHERR_INDEX1_SHIFT 16
#define BCH_BHERR_INDEX1_MASK (0x1fff << BCH_BHERR_INDEX1_SHIFT)
#define BCH_BHINT_ERRC_SHIFT 28
#define BCH_BHINT_ERRC_MASK (0xf << BCH_BHINT_ERRC_SHIFT)
#define BCH_BHINT_TERRC_SHIFT 16
#define BCH_BHINT_TERRC_MASK (0x7f << BCH_BHINT_TERRC_SHIFT)
#define BCH_BHINT_ALL_0 BIT(5)
#define BCH_BHINT_ALL_F BIT(4)
#define BCH_BHINT_DECF BIT(3)
#define BCH_BHINT_ENCF BIT(2)
#define BCH_BHINT_UNCOR BIT(1)
#define BCH_BHINT_ERR BIT(0)
/* Timeout for BCH calculation/correction. */
#define BCH_TIMEOUT_US 100000
static inline void jz4725b_bch_config_set(struct ingenic_ecc *bch, u32 cfg)
{
writel(cfg, bch->base + BCH_BHCSR);
}
static inline void jz4725b_bch_config_clear(struct ingenic_ecc *bch, u32 cfg)
{
writel(cfg, bch->base + BCH_BHCCR);
}
static int jz4725b_bch_reset(struct ingenic_ecc *bch,
struct ingenic_ecc_params *params, bool calc_ecc)
{
u32 reg, max_value;
/* Clear interrupt status. */
writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
/* Initialise and enable BCH. */
jz4725b_bch_config_clear(bch, 0x1f);
jz4725b_bch_config_set(bch, BCH_BHCR_BCHE);
if (params->strength == 8)
jz4725b_bch_config_set(bch, BCH_BHCR_BSEL);
else
jz4725b_bch_config_clear(bch, BCH_BHCR_BSEL);
if (calc_ecc) /* calculate ECC from data */
jz4725b_bch_config_set(bch, BCH_BHCR_ENCE);
else /* correct data from ECC */
jz4725b_bch_config_clear(bch, BCH_BHCR_ENCE);
jz4725b_bch_config_set(bch, BCH_BHCR_INIT);
max_value = BCH_BHCNT_ENC_COUNT_MASK >> BCH_BHCNT_ENC_COUNT_SHIFT;
if (params->size > max_value)
return -EINVAL;
max_value = BCH_BHCNT_DEC_COUNT_MASK >> BCH_BHCNT_DEC_COUNT_SHIFT;
if (params->size + params->bytes > max_value)
return -EINVAL;
/* Set up BCH count register. */
reg = params->size << BCH_BHCNT_ENC_COUNT_SHIFT;
reg |= (params->size + params->bytes) << BCH_BHCNT_DEC_COUNT_SHIFT;
writel(reg, bch->base + BCH_BHCNT);
return 0;
}
static void jz4725b_bch_disable(struct ingenic_ecc *bch)
{
/* Clear interrupts */
writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
/* Disable the hardware */
jz4725b_bch_config_clear(bch, BCH_BHCR_BCHE);
}
static void jz4725b_bch_write_data(struct ingenic_ecc *bch, const u8 *buf,
size_t size)
{
while (size--)
writeb(*buf++, bch->base + BCH_BHDR);
}
static void jz4725b_bch_read_parity(struct ingenic_ecc *bch, u8 *buf,
size_t size)
{
size_t size32 = size / sizeof(u32);
size_t size8 = size % sizeof(u32);
u32 *dest32;
u8 *dest8;
u32 val, offset = 0;
dest32 = (u32 *)buf;
while (size32--) {
*dest32++ = readl_relaxed(bch->base + BCH_BHPAR0 + offset);
offset += sizeof(u32);
}
dest8 = (u8 *)dest32;
val = readl_relaxed(bch->base + BCH_BHPAR0 + offset);
switch (size8) {
case 3:
dest8[2] = (val >> 16) & 0xff;
fallthrough;
case 2:
dest8[1] = (val >> 8) & 0xff;
fallthrough;
case 1:
dest8[0] = val & 0xff;
break;
}
}
static int jz4725b_bch_wait_complete(struct ingenic_ecc *bch, unsigned int irq,
u32 *status)
{
u32 reg;
int ret;
/*
* While we could use interrupts here and sleep until the operation
* completes, the controller works fairly quickly (usually a few
* microseconds) and so the overhead of sleeping until we get an
* interrupt quite noticeably decreases performance.
*/
ret = readl_relaxed_poll_timeout(bch->base + BCH_BHINT, reg,
reg & irq, 0, BCH_TIMEOUT_US);
if (ret)
return ret;
if (status)
*status = reg;
writel(reg, bch->base + BCH_BHINT);
return 0;
}
static int jz4725b_calculate(struct ingenic_ecc *bch,
struct ingenic_ecc_params *params,
const u8 *buf, u8 *ecc_code)
{
int ret;
mutex_lock(&bch->lock);
ret = jz4725b_bch_reset(bch, params, true);
if (ret) {
dev_err(bch->dev, "Unable to init BCH with given parameters\n");
goto out_disable;
}
jz4725b_bch_write_data(bch, buf, params->size);
ret = jz4725b_bch_wait_complete(bch, BCH_BHINT_ENCF, NULL);
if (ret) {
dev_err(bch->dev, "timed out while calculating ECC\n");
goto out_disable;
}
jz4725b_bch_read_parity(bch, ecc_code, params->bytes);
out_disable:
jz4725b_bch_disable(bch);
mutex_unlock(&bch->lock);
return ret;
}
static int jz4725b_correct(struct ingenic_ecc *bch,
struct ingenic_ecc_params *params,
u8 *buf, u8 *ecc_code)
{
u32 reg, errors, bit;
unsigned int i;
int ret;
mutex_lock(&bch->lock);
ret = jz4725b_bch_reset(bch, params, false);
if (ret) {
dev_err(bch->dev, "Unable to init BCH with given parameters\n");
goto out;
}
jz4725b_bch_write_data(bch, buf, params->size);
jz4725b_bch_write_data(bch, ecc_code, params->bytes);
ret = jz4725b_bch_wait_complete(bch, BCH_BHINT_DECF, ®);
if (ret) {
dev_err(bch->dev, "timed out while correcting data\n");
goto out;
}
if (reg & (BCH_BHINT_ALL_F | BCH_BHINT_ALL_0)) {
/* Data and ECC is all 0xff or 0x00 - nothing to correct */
ret = 0;
goto out;
}
if (reg & BCH_BHINT_UNCOR) {
/* Uncorrectable ECC error */
ret = -EBADMSG;
goto out;
}
errors = (reg & BCH_BHINT_ERRC_MASK) >> BCH_BHINT_ERRC_SHIFT;
/* Correct any detected errors. */
for (i = 0; i < errors; i++) {
if (i & 1) {
bit = (reg & BCH_BHERR_INDEX1_MASK) >> BCH_BHERR_INDEX1_SHIFT;
} else {
reg = readl(bch->base + BCH_BHERR0 + (i * 4));
bit = (reg & BCH_BHERR_INDEX0_MASK) >> BCH_BHERR_INDEX0_SHIFT;
}
buf[(bit >> 3)] ^= BIT(bit & 0x7);
}
out:
jz4725b_bch_disable(bch);
mutex_unlock(&bch->lock);
return ret;
}
static const struct ingenic_ecc_ops jz4725b_bch_ops = {
.disable = jz4725b_bch_disable,
.calculate = jz4725b_calculate,
.correct = jz4725b_correct,
};
static const struct of_device_id jz4725b_bch_dt_match[] = {
{ .compatible = "ingenic,jz4725b-bch", .data = &jz4725b_bch_ops },
{},
};
MODULE_DEVICE_TABLE(of, jz4725b_bch_dt_match);
static struct platform_driver jz4725b_bch_driver = {
.probe = ingenic_ecc_probe,
.driver = {
.name = "jz4725b-bch",
.of_match_table = jz4725b_bch_dt_match,
},
};
module_platform_driver(jz4725b_bch_driver);
MODULE_AUTHOR("Paul Cercueil <[email protected]>");
MODULE_DESCRIPTION("Ingenic JZ4725B BCH controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mtd/nand/raw/ingenic/jz4725b_bch.c |
// SPDX-License-Identifier: GPL-2.0
/*
* JZ4780 BCH controller driver
*
* Copyright (c) 2015 Imagination Technologies
* Author: Alex Smith <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include "ingenic_ecc.h"
#define BCH_BHCR 0x0
#define BCH_BHCCR 0x8
#define BCH_BHCNT 0xc
#define BCH_BHDR 0x10
#define BCH_BHPAR0 0x14
#define BCH_BHERR0 0x84
#define BCH_BHINT 0x184
#define BCH_BHINTES 0x188
#define BCH_BHINTEC 0x18c
#define BCH_BHINTE 0x190
#define BCH_BHCR_BSEL_SHIFT 4
#define BCH_BHCR_BSEL_MASK (0x7f << BCH_BHCR_BSEL_SHIFT)
#define BCH_BHCR_ENCE BIT(2)
#define BCH_BHCR_INIT BIT(1)
#define BCH_BHCR_BCHE BIT(0)
#define BCH_BHCNT_PARITYSIZE_SHIFT 16
#define BCH_BHCNT_PARITYSIZE_MASK (0x7f << BCH_BHCNT_PARITYSIZE_SHIFT)
#define BCH_BHCNT_BLOCKSIZE_SHIFT 0
#define BCH_BHCNT_BLOCKSIZE_MASK (0x7ff << BCH_BHCNT_BLOCKSIZE_SHIFT)
#define BCH_BHERR_MASK_SHIFT 16
#define BCH_BHERR_MASK_MASK (0xffff << BCH_BHERR_MASK_SHIFT)
#define BCH_BHERR_INDEX_SHIFT 0
#define BCH_BHERR_INDEX_MASK (0x7ff << BCH_BHERR_INDEX_SHIFT)
#define BCH_BHINT_ERRC_SHIFT 24
#define BCH_BHINT_ERRC_MASK (0x7f << BCH_BHINT_ERRC_SHIFT)
#define BCH_BHINT_TERRC_SHIFT 16
#define BCH_BHINT_TERRC_MASK (0x7f << BCH_BHINT_TERRC_SHIFT)
#define BCH_BHINT_DECF BIT(3)
#define BCH_BHINT_ENCF BIT(2)
#define BCH_BHINT_UNCOR BIT(1)
#define BCH_BHINT_ERR BIT(0)
#define BCH_CLK_RATE (200 * 1000 * 1000)
/* Timeout for BCH calculation/correction. */
#define BCH_TIMEOUT_US 100000
static void jz4780_bch_reset(struct ingenic_ecc *bch,
struct ingenic_ecc_params *params, bool encode)
{
u32 reg;
/* Clear interrupt status. */
writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
/* Set up BCH count register. */
reg = params->size << BCH_BHCNT_BLOCKSIZE_SHIFT;
reg |= params->bytes << BCH_BHCNT_PARITYSIZE_SHIFT;
writel(reg, bch->base + BCH_BHCNT);
/* Initialise and enable BCH. */
reg = BCH_BHCR_BCHE | BCH_BHCR_INIT;
reg |= params->strength << BCH_BHCR_BSEL_SHIFT;
if (encode)
reg |= BCH_BHCR_ENCE;
writel(reg, bch->base + BCH_BHCR);
}
static void jz4780_bch_disable(struct ingenic_ecc *bch)
{
writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
writel(BCH_BHCR_BCHE, bch->base + BCH_BHCCR);
}
static void jz4780_bch_write_data(struct ingenic_ecc *bch, const void *buf,
size_t size)
{
size_t size32 = size / sizeof(u32);
size_t size8 = size % sizeof(u32);
const u32 *src32;
const u8 *src8;
src32 = (const u32 *)buf;
while (size32--)
writel(*src32++, bch->base + BCH_BHDR);
src8 = (const u8 *)src32;
while (size8--)
writeb(*src8++, bch->base + BCH_BHDR);
}
static void jz4780_bch_read_parity(struct ingenic_ecc *bch, void *buf,
size_t size)
{
size_t size32 = size / sizeof(u32);
size_t size8 = size % sizeof(u32);
u32 *dest32;
u8 *dest8;
u32 val, offset = 0;
dest32 = (u32 *)buf;
while (size32--) {
*dest32++ = readl(bch->base + BCH_BHPAR0 + offset);
offset += sizeof(u32);
}
dest8 = (u8 *)dest32;
val = readl(bch->base + BCH_BHPAR0 + offset);
switch (size8) {
case 3:
dest8[2] = (val >> 16) & 0xff;
fallthrough;
case 2:
dest8[1] = (val >> 8) & 0xff;
fallthrough;
case 1:
dest8[0] = val & 0xff;
break;
}
}
static bool jz4780_bch_wait_complete(struct ingenic_ecc *bch, unsigned int irq,
u32 *status)
{
u32 reg;
int ret;
/*
* While we could use interrupts here and sleep until the operation
* completes, the controller works fairly quickly (usually a few
* microseconds) and so the overhead of sleeping until we get an
* interrupt quite noticeably decreases performance.
*/
ret = readl_poll_timeout(bch->base + BCH_BHINT, reg,
(reg & irq) == irq, 0, BCH_TIMEOUT_US);
if (ret)
return false;
if (status)
*status = reg;
writel(reg, bch->base + BCH_BHINT);
return true;
}
static int jz4780_calculate(struct ingenic_ecc *bch,
struct ingenic_ecc_params *params,
const u8 *buf, u8 *ecc_code)
{
int ret = 0;
mutex_lock(&bch->lock);
jz4780_bch_reset(bch, params, true);
jz4780_bch_write_data(bch, buf, params->size);
if (jz4780_bch_wait_complete(bch, BCH_BHINT_ENCF, NULL)) {
jz4780_bch_read_parity(bch, ecc_code, params->bytes);
} else {
dev_err(bch->dev, "timed out while calculating ECC\n");
ret = -ETIMEDOUT;
}
jz4780_bch_disable(bch);
mutex_unlock(&bch->lock);
return ret;
}
static int jz4780_correct(struct ingenic_ecc *bch,
struct ingenic_ecc_params *params,
u8 *buf, u8 *ecc_code)
{
u32 reg, mask, index;
int i, ret, count;
mutex_lock(&bch->lock);
jz4780_bch_reset(bch, params, false);
jz4780_bch_write_data(bch, buf, params->size);
jz4780_bch_write_data(bch, ecc_code, params->bytes);
if (!jz4780_bch_wait_complete(bch, BCH_BHINT_DECF, ®)) {
dev_err(bch->dev, "timed out while correcting data\n");
ret = -ETIMEDOUT;
goto out;
}
if (reg & BCH_BHINT_UNCOR) {
dev_warn(bch->dev, "uncorrectable ECC error\n");
ret = -EBADMSG;
goto out;
}
/* Correct any detected errors. */
if (reg & BCH_BHINT_ERR) {
count = (reg & BCH_BHINT_ERRC_MASK) >> BCH_BHINT_ERRC_SHIFT;
ret = (reg & BCH_BHINT_TERRC_MASK) >> BCH_BHINT_TERRC_SHIFT;
for (i = 0; i < count; i++) {
reg = readl(bch->base + BCH_BHERR0 + (i * 4));
mask = (reg & BCH_BHERR_MASK_MASK) >>
BCH_BHERR_MASK_SHIFT;
index = (reg & BCH_BHERR_INDEX_MASK) >>
BCH_BHERR_INDEX_SHIFT;
buf[(index * 2) + 0] ^= mask;
buf[(index * 2) + 1] ^= mask >> 8;
}
} else {
ret = 0;
}
out:
jz4780_bch_disable(bch);
mutex_unlock(&bch->lock);
return ret;
}
static int jz4780_bch_probe(struct platform_device *pdev)
{
struct ingenic_ecc *bch;
int ret;
ret = ingenic_ecc_probe(pdev);
if (ret)
return ret;
bch = platform_get_drvdata(pdev);
clk_set_rate(bch->clk, BCH_CLK_RATE);
return 0;
}
static const struct ingenic_ecc_ops jz4780_bch_ops = {
.disable = jz4780_bch_disable,
.calculate = jz4780_calculate,
.correct = jz4780_correct,
};
static const struct of_device_id jz4780_bch_dt_match[] = {
{ .compatible = "ingenic,jz4780-bch", .data = &jz4780_bch_ops },
{},
};
MODULE_DEVICE_TABLE(of, jz4780_bch_dt_match);
static struct platform_driver jz4780_bch_driver = {
.probe = jz4780_bch_probe,
.driver = {
.name = "jz4780-bch",
.of_match_table = jz4780_bch_dt_match,
},
};
module_platform_driver(jz4780_bch_driver);
MODULE_AUTHOR("Alex Smith <[email protected]>");
MODULE_AUTHOR("Harvey Hunt <[email protected]>");
MODULE_DESCRIPTION("Ingenic JZ4780 BCH error correction driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mtd/nand/raw/ingenic/jz4780_bch.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale GPMI NAND Flash Driver
*
* Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
* Copyright (C) 2008 Embedded Alley Solutions, Inc.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/sched/task_stack.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/dma/mxs-dma.h>
#include "gpmi-nand.h"
#include "gpmi-regs.h"
#include "bch-regs.h"
/* Resource names for the GPMI NAND driver. */
#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
/* Converts time to clock cycles */
#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
#define MXS_SET_ADDR 0x4
#define MXS_CLR_ADDR 0x8
/*
* Clear the bit and poll it cleared. This is usually called with
* a reset address and mask being either SFTRST(bit 31) or CLKGATE
* (bit 30).
*/
static int clear_poll_bit(void __iomem *addr, u32 mask)
{
int timeout = 0x400;
/* clear the bit */
writel(mask, addr + MXS_CLR_ADDR);
/*
* SFTRST needs 3 GPMI clocks to settle, the reference manual
* recommends to wait 1us.
*/
udelay(1);
/* poll the bit becoming clear */
while ((readl(addr) & mask) && --timeout)
/* nothing */;
return !timeout;
}
#define MODULE_CLKGATE (1 << 30)
#define MODULE_SFTRST (1 << 31)
/*
* The current mxs_reset_block() will do two things:
* [1] enable the module.
* [2] reset the module.
*
* In most of the cases, it's ok.
* But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
* If you try to soft reset the BCH block, it becomes unusable until
* the next hard reset. This case occurs in the NAND boot mode. When the board
* boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
* So If the driver tries to reset the BCH again, the BCH will not work anymore.
* You will see a DMA timeout in this case. The bug has been fixed
* in the following chips, such as MX28.
*
* To avoid this bug, just add a new parameter `just_enable` for
* the mxs_reset_block(), and rewrite it here.
*/
static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
{
int ret;
int timeout = 0x400;
/* clear and poll SFTRST */
ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
if (unlikely(ret))
goto error;
/* clear CLKGATE */
writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
if (!just_enable) {
/* set SFTRST to reset the block */
writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
udelay(1);
/* poll CLKGATE becoming set */
while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
/* nothing */;
if (unlikely(!timeout))
goto error;
}
/* clear and poll SFTRST */
ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
if (unlikely(ret))
goto error;
/* clear and poll CLKGATE */
ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
if (unlikely(ret))
goto error;
return 0;
error:
pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
return -ETIMEDOUT;
}
static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
{
struct clk *clk;
int ret;
int i;
for (i = 0; i < GPMI_CLK_MAX; i++) {
clk = this->resources.clock[i];
if (!clk)
break;
if (v) {
ret = clk_prepare_enable(clk);
if (ret)
goto err_clk;
} else {
clk_disable_unprepare(clk);
}
}
return 0;
err_clk:
for (; i > 0; i--)
clk_disable_unprepare(this->resources.clock[i - 1]);
return ret;
}
static int gpmi_init(struct gpmi_nand_data *this)
{
struct resources *r = &this->resources;
int ret;
ret = pm_runtime_resume_and_get(this->dev);
if (ret < 0)
return ret;
ret = gpmi_reset_block(r->gpmi_regs, false);
if (ret)
goto err_out;
/*
* Reset BCH here, too. We got failures otherwise :(
* See later BCH reset for explanation of MX23 and MX28 handling
*/
ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
if (ret)
goto err_out;
/* Choose NAND mode. */
writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
/* Set the IRQ polarity. */
writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
r->gpmi_regs + HW_GPMI_CTRL1_SET);
/* Disable Write-Protection. */
writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
/* Select BCH ECC. */
writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
/*
* Decouple the chip select from dma channel. We use dma0 for all
* the chips, force all NAND RDY_BUSY inputs to be sourced from
* RDY_BUSY0.
*/
writel(BM_GPMI_CTRL1_DECOUPLE_CS | BM_GPMI_CTRL1_GANGED_RDYBUSY,
r->gpmi_regs + HW_GPMI_CTRL1_SET);
err_out:
pm_runtime_mark_last_busy(this->dev);
pm_runtime_put_autosuspend(this->dev);
return ret;
}
/* This function is very useful. It is called only when the bug occur. */
static void gpmi_dump_info(struct gpmi_nand_data *this)
{
struct resources *r = &this->resources;
struct bch_geometry *geo = &this->bch_geometry;
u32 reg;
int i;
dev_err(this->dev, "Show GPMI registers :\n");
for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
reg = readl(r->gpmi_regs + i * 0x10);
dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
}
/* start to print out the BCH info */
dev_err(this->dev, "Show BCH registers :\n");
for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
reg = readl(r->bch_regs + i * 0x10);
dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
}
dev_err(this->dev, "BCH Geometry :\n"
"GF length : %u\n"
"ECC Strength : %u\n"
"Page Size in Bytes : %u\n"
"Metadata Size in Bytes : %u\n"
"ECC0 Chunk Size in Bytes: %u\n"
"ECCn Chunk Size in Bytes: %u\n"
"ECC Chunk Count : %u\n"
"Payload Size in Bytes : %u\n"
"Auxiliary Size in Bytes: %u\n"
"Auxiliary Status Offset: %u\n"
"Block Mark Byte Offset : %u\n"
"Block Mark Bit Offset : %u\n",
geo->gf_len,
geo->ecc_strength,
geo->page_size,
geo->metadata_size,
geo->ecc0_chunk_size,
geo->eccn_chunk_size,
geo->ecc_chunk_count,
geo->payload_size,
geo->auxiliary_size,
geo->auxiliary_status_offset,
geo->block_mark_byte_offset,
geo->block_mark_bit_offset);
}
static bool gpmi_check_ecc(struct gpmi_nand_data *this)
{
struct nand_chip *chip = &this->nand;
struct bch_geometry *geo = &this->bch_geometry;
struct nand_device *nand = &chip->base;
struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
conf->step_size = geo->eccn_chunk_size;
conf->strength = geo->ecc_strength;
/* Do the sanity check. */
if (GPMI_IS_MXS(this)) {
/* The mx23/mx28 only support the GF13. */
if (geo->gf_len == 14)
return false;
}
if (geo->ecc_strength > this->devdata->bch_max_ecc_strength)
return false;
if (!nand_ecc_is_strong_enough(nand))
return false;
return true;
}
/* check if bbm locates in data chunk rather than ecc chunk */
static bool bbm_in_data_chunk(struct gpmi_nand_data *this,
unsigned int *chunk_num)
{
struct bch_geometry *geo = &this->bch_geometry;
struct nand_chip *chip = &this->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int i, j;
if (geo->ecc0_chunk_size != geo->eccn_chunk_size) {
dev_err(this->dev,
"The size of ecc0_chunk must equal to eccn_chunk\n");
return false;
}
i = (mtd->writesize * 8 - geo->metadata_size * 8) /
(geo->gf_len * geo->ecc_strength +
geo->eccn_chunk_size * 8);
j = (mtd->writesize * 8 - geo->metadata_size * 8) -
(geo->gf_len * geo->ecc_strength +
geo->eccn_chunk_size * 8) * i;
if (j < geo->eccn_chunk_size * 8) {
*chunk_num = i+1;
dev_dbg(this->dev, "Set ecc to %d and bbm in chunk %d\n",
geo->ecc_strength, *chunk_num);
return true;
}
return false;
}
/*
* If we can get the ECC information from the nand chip, we do not
* need to calculate them ourselves.
*
* We may have available oob space in this case.
*/
static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
unsigned int ecc_strength,
unsigned int ecc_step)
{
struct bch_geometry *geo = &this->bch_geometry;
struct nand_chip *chip = &this->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int block_mark_bit_offset;
switch (ecc_step) {
case SZ_512:
geo->gf_len = 13;
break;
case SZ_1K:
geo->gf_len = 14;
break;
default:
dev_err(this->dev,
"unsupported nand chip. ecc bits : %d, ecc size : %d\n",
nanddev_get_ecc_requirements(&chip->base)->strength,
nanddev_get_ecc_requirements(&chip->base)->step_size);
return -EINVAL;
}
geo->ecc0_chunk_size = ecc_step;
geo->eccn_chunk_size = ecc_step;
geo->ecc_strength = round_up(ecc_strength, 2);
if (!gpmi_check_ecc(this))
return -EINVAL;
/* Keep the C >= O */
if (geo->eccn_chunk_size < mtd->oobsize) {
dev_err(this->dev,
"unsupported nand chip. ecc size: %d, oob size : %d\n",
ecc_step, mtd->oobsize);
return -EINVAL;
}
/* The default value, see comment in the legacy_set_geometry(). */
geo->metadata_size = 10;
geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size;
/*
* Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
*
* | P |
* |<----------------------------------------------------->|
* | |
* | (Block Mark) |
* | P' | | | |
* |<-------------------------------------------->| D | | O' |
* | |<---->| |<--->|
* V V V V V
* +---+----------+-+----------+-+----------+-+----------+-+-----+
* | M | data |E| data |E| data |E| data |E| |
* +---+----------+-+----------+-+----------+-+----------+-+-----+
* ^ ^
* | O |
* |<------------>|
* | |
*
* P : the page size for BCH module.
* E : The ECC strength.
* G : the length of Galois Field.
* N : The chunk count of per page.
* M : the metasize of per page.
* C : the ecc chunk size, aka the "data" above.
* P': the nand chip's page size.
* O : the nand chip's oob size.
* O': the free oob.
*
* The formula for P is :
*
* E * G * N
* P = ------------ + P' + M
* 8
*
* The position of block mark moves forward in the ECC-based view
* of page, and the delta is:
*
* E * G * (N - 1)
* D = (---------------- + M)
* 8
*
* Please see the comment in legacy_set_geometry().
* With the condition C >= O , we still can get same result.
* So the bit position of the physical block mark within the ECC-based
* view of the page is :
* (P' - D) * 8
*/
geo->page_size = mtd->writesize + geo->metadata_size +
(geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
geo->payload_size = mtd->writesize;
geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
+ ALIGN(geo->ecc_chunk_count, 4);
if (!this->swap_block_mark)
return 0;
/* For bit swap. */
block_mark_bit_offset = mtd->writesize * 8 -
(geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+ geo->metadata_size * 8);
geo->block_mark_byte_offset = block_mark_bit_offset / 8;
geo->block_mark_bit_offset = block_mark_bit_offset % 8;
return 0;
}
/*
* Calculate the ECC strength by hand:
* E : The ECC strength.
* G : the length of Galois Field.
* N : The chunk count of per page.
* O : the oobsize of the NAND chip.
* M : the metasize of per page.
*
* The formula is :
* E * G * N
* ------------ <= (O - M)
* 8
*
* So, we get E by:
* (O - M) * 8
* E <= -------------
* G * N
*/
static inline int get_ecc_strength(struct gpmi_nand_data *this)
{
struct bch_geometry *geo = &this->bch_geometry;
struct mtd_info *mtd = nand_to_mtd(&this->nand);
int ecc_strength;
ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
/ (geo->gf_len * geo->ecc_chunk_count);
/* We need the minor even number. */
return round_down(ecc_strength, 2);
}
static int set_geometry_for_large_oob(struct gpmi_nand_data *this)
{
struct bch_geometry *geo = &this->bch_geometry;
struct nand_chip *chip = &this->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(&chip->base);
unsigned int block_mark_bit_offset;
unsigned int max_ecc;
unsigned int bbm_chunk;
unsigned int i;
/* sanity check for the minimum ecc nand required */
if (!(requirements->strength > 0 &&
requirements->step_size > 0))
return -EINVAL;
geo->ecc_strength = requirements->strength;
/* check if platform can support this nand */
if (!gpmi_check_ecc(this)) {
dev_err(this->dev,
"unsupported NAND chip, minimum ecc required %d\n",
geo->ecc_strength);
return -EINVAL;
}
/* calculate the maximum ecc platform can support*/
geo->metadata_size = 10;
geo->gf_len = 14;
geo->ecc0_chunk_size = 1024;
geo->eccn_chunk_size = 1024;
geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size;
max_ecc = min(get_ecc_strength(this),
this->devdata->bch_max_ecc_strength);
/*
* search a supported ecc strength that makes bbm
* located in data chunk
*/
geo->ecc_strength = max_ecc;
while (!(geo->ecc_strength < requirements->strength)) {
if (bbm_in_data_chunk(this, &bbm_chunk))
goto geo_setting;
geo->ecc_strength -= 2;
}
/* if none of them works, keep using the minimum ecc */
/* nand required but changing ecc page layout */
geo->ecc_strength = requirements->strength;
/* add extra ecc for meta data */
geo->ecc0_chunk_size = 0;
geo->ecc_chunk_count = (mtd->writesize / geo->eccn_chunk_size) + 1;
geo->ecc_for_meta = 1;
/* check if oob can afford this extra ecc chunk */
if (mtd->oobsize * 8 < geo->metadata_size * 8 +
geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) {
dev_err(this->dev, "unsupported NAND chip with new layout\n");
return -EINVAL;
}
/* calculate in which chunk bbm located */
bbm_chunk = (mtd->writesize * 8 - geo->metadata_size * 8 -
geo->gf_len * geo->ecc_strength) /
(geo->gf_len * geo->ecc_strength +
geo->eccn_chunk_size * 8) + 1;
geo_setting:
geo->page_size = mtd->writesize + geo->metadata_size +
(geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
geo->payload_size = mtd->writesize;
/*
* The auxiliary buffer contains the metadata and the ECC status. The
* metadata is padded to the nearest 32-bit boundary. The ECC status
* contains one byte for every ECC chunk, and is also padded to the
* nearest 32-bit boundary.
*/
geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
+ ALIGN(geo->ecc_chunk_count, 4);
if (!this->swap_block_mark)
return 0;
/* calculate the number of ecc chunk behind the bbm */
i = (mtd->writesize / geo->eccn_chunk_size) - bbm_chunk + 1;
block_mark_bit_offset = mtd->writesize * 8 -
(geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
+ geo->metadata_size * 8);
geo->block_mark_byte_offset = block_mark_bit_offset / 8;
geo->block_mark_bit_offset = block_mark_bit_offset % 8;
dev_dbg(this->dev, "BCH Geometry :\n"
"GF length : %u\n"
"ECC Strength : %u\n"
"Page Size in Bytes : %u\n"
"Metadata Size in Bytes : %u\n"
"ECC0 Chunk Size in Bytes: %u\n"
"ECCn Chunk Size in Bytes: %u\n"
"ECC Chunk Count : %u\n"
"Payload Size in Bytes : %u\n"
"Auxiliary Size in Bytes: %u\n"
"Auxiliary Status Offset: %u\n"
"Block Mark Byte Offset : %u\n"
"Block Mark Bit Offset : %u\n"
"Block Mark in chunk : %u\n"
"Ecc for Meta data : %u\n",
geo->gf_len,
geo->ecc_strength,
geo->page_size,
geo->metadata_size,
geo->ecc0_chunk_size,
geo->eccn_chunk_size,
geo->ecc_chunk_count,
geo->payload_size,
geo->auxiliary_size,
geo->auxiliary_status_offset,
geo->block_mark_byte_offset,
geo->block_mark_bit_offset,
bbm_chunk,
geo->ecc_for_meta);
return 0;
}
static int legacy_set_geometry(struct gpmi_nand_data *this)
{
struct bch_geometry *geo = &this->bch_geometry;
struct mtd_info *mtd = nand_to_mtd(&this->nand);
unsigned int metadata_size;
unsigned int status_size;
unsigned int block_mark_bit_offset;
/*
* The size of the metadata can be changed, though we set it to 10
* bytes now. But it can't be too large, because we have to save
* enough space for BCH.
*/
geo->metadata_size = 10;
/* The default for the length of Galois Field. */
geo->gf_len = 13;
/* The default for chunk size. */
geo->ecc0_chunk_size = 512;
geo->eccn_chunk_size = 512;
while (geo->eccn_chunk_size < mtd->oobsize) {
geo->ecc0_chunk_size *= 2; /* keep C >= O */
geo->eccn_chunk_size *= 2; /* keep C >= O */
geo->gf_len = 14;
}
geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size;
/* We use the same ECC strength for all chunks. */
geo->ecc_strength = get_ecc_strength(this);
if (!gpmi_check_ecc(this)) {
dev_err(this->dev,
"ecc strength: %d cannot be supported by the controller (%d)\n"
"try to use minimum ecc strength that NAND chip required\n",
geo->ecc_strength,
this->devdata->bch_max_ecc_strength);
return -EINVAL;
}
geo->page_size = mtd->writesize + geo->metadata_size +
(geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
geo->payload_size = mtd->writesize;
/*
* The auxiliary buffer contains the metadata and the ECC status. The
* metadata is padded to the nearest 32-bit boundary. The ECC status
* contains one byte for every ECC chunk, and is also padded to the
* nearest 32-bit boundary.
*/
metadata_size = ALIGN(geo->metadata_size, 4);
status_size = ALIGN(geo->ecc_chunk_count, 4);
geo->auxiliary_size = metadata_size + status_size;
geo->auxiliary_status_offset = metadata_size;
if (!this->swap_block_mark)
return 0;
/*
* We need to compute the byte and bit offsets of
* the physical block mark within the ECC-based view of the page.
*
* NAND chip with 2K page shows below:
* (Block Mark)
* | |
* | D |
* |<---->|
* V V
* +---+----------+-+----------+-+----------+-+----------+-+
* | M | data |E| data |E| data |E| data |E|
* +---+----------+-+----------+-+----------+-+----------+-+
*
* The position of block mark moves forward in the ECC-based view
* of page, and the delta is:
*
* E * G * (N - 1)
* D = (---------------- + M)
* 8
*
* With the formula to compute the ECC strength, and the condition
* : C >= O (C is the ecc chunk size)
*
* It's easy to deduce to the following result:
*
* E * G (O - M) C - M C - M
* ----------- <= ------- <= -------- < ---------
* 8 N N (N - 1)
*
* So, we get:
*
* E * G * (N - 1)
* D = (---------------- + M) < C
* 8
*
* The above inequality means the position of block mark
* within the ECC-based view of the page is still in the data chunk,
* and it's NOT in the ECC bits of the chunk.
*
* Use the following to compute the bit position of the
* physical block mark within the ECC-based view of the page:
* (page_size - D) * 8
*
* --Huang Shijie
*/
block_mark_bit_offset = mtd->writesize * 8 -
(geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+ geo->metadata_size * 8);
geo->block_mark_byte_offset = block_mark_bit_offset / 8;
geo->block_mark_bit_offset = block_mark_bit_offset % 8;
return 0;
}
static int common_nfc_set_geometry(struct gpmi_nand_data *this)
{
struct nand_chip *chip = &this->nand;
struct mtd_info *mtd = nand_to_mtd(&this->nand);
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(&chip->base);
bool use_minimun_ecc;
int err;
use_minimun_ecc = of_property_read_bool(this->dev->of_node,
"fsl,use-minimum-ecc");
/* use legacy bch geometry settings by default*/
if ((!use_minimun_ecc && mtd->oobsize < 1024) ||
!(requirements->strength > 0 && requirements->step_size > 0)) {
dev_dbg(this->dev, "use legacy bch geometry\n");
err = legacy_set_geometry(this);
if (!err)
return 0;
}
/* for large oob nand */
if (mtd->oobsize > 1024) {
dev_dbg(this->dev, "use large oob bch geometry\n");
err = set_geometry_for_large_oob(this);
if (!err)
return 0;
}
/* otherwise use the minimum ecc nand chip required */
dev_dbg(this->dev, "use minimum ecc bch geometry\n");
err = set_geometry_by_ecc_info(this, requirements->strength,
requirements->step_size);
if (err)
dev_err(this->dev, "none of the bch geometry setting works\n");
return err;
}
/* Configures the geometry for BCH. */
static int bch_set_geometry(struct gpmi_nand_data *this)
{
struct resources *r = &this->resources;
int ret;
ret = common_nfc_set_geometry(this);
if (ret)
return ret;
ret = pm_runtime_get_sync(this->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(this->dev);
return ret;
}
/*
* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
* and MX28.
*/
ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
if (ret)
goto err_out;
/* Set *all* chip selects to use layout 0. */
writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
ret = 0;
err_out:
pm_runtime_mark_last_busy(this->dev);
pm_runtime_put_autosuspend(this->dev);
return ret;
}
/*
* <1> Firstly, we should know what's the GPMI-clock means.
* The GPMI-clock is the internal clock in the gpmi nand controller.
* If you set 100MHz to gpmi nand controller, the GPMI-clock's period
* is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
*
* <2> Secondly, we should know what's the frequency on the nand chip pins.
* The frequency on the nand chip pins is derived from the GPMI-clock.
* We can get it from the following equation:
*
* F = G / (DS + DH)
*
* F : the frequency on the nand chip pins.
* G : the GPMI clock, such as 100MHz.
* DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
* DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
*
* <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
* the nand EDO(extended Data Out) timing could be applied.
* The GPMI implements a feedback read strobe to sample the read data.
* The feedback read strobe can be delayed to support the nand EDO timing
* where the read strobe may deasserts before the read data is valid, and
* read data is valid for some time after read strobe.
*
* The following figure illustrates some aspects of a NAND Flash read:
*
* |<---tREA---->|
* | |
* | | |
* |<--tRP-->| |
* | | |
* __ ___|__________________________________
* RDN \________/ |
* |
* /---------\
* Read Data --------------< >---------
* \---------/
* | |
* |<-D->|
* FeedbackRDN ________ ____________
* \___________/
*
* D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
*
*
* <4> Now, we begin to describe how to compute the right RDN_DELAY.
*
* 4.1) From the aspect of the nand chip pins:
* Delay = (tREA + C - tRP) {1}
*
* tREA : the maximum read access time.
* C : a constant to adjust the delay. default is 4000ps.
* tRP : the read pulse width, which is exactly:
* tRP = (GPMI-clock-period) * DATA_SETUP
*
* 4.2) From the aspect of the GPMI nand controller:
* Delay = RDN_DELAY * 0.125 * RP {2}
*
* RP : the DLL reference period.
* if (GPMI-clock-period > DLL_THRETHOLD)
* RP = GPMI-clock-period / 2;
* else
* RP = GPMI-clock-period;
*
* Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
* is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
* is 16000ps, but in mx6q, we use 12000ps.
*
* 4.3) since {1} equals {2}, we get:
*
* (tREA + 4000 - tRP) * 8
* RDN_DELAY = ----------------------- {3}
* RP
*/
static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
const struct nand_sdr_timings *sdr)
{
struct gpmi_nfc_hardware_timing *hw = &this->hw;
struct resources *r = &this->resources;
unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
unsigned int period_ps, reference_period_ps;
unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
unsigned int tRP_ps;
bool use_half_period;
int sample_delay_ps, sample_delay_factor;
unsigned int busy_timeout_cycles;
u8 wrn_dly_sel;
unsigned long clk_rate, min_rate;
u64 busy_timeout_ps;
if (sdr->tRC_min >= 30000) {
/* ONFI non-EDO modes [0-3] */
hw->clk_rate = 22000000;
min_rate = 0;
wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
} else if (sdr->tRC_min >= 25000) {
/* ONFI EDO mode 4 */
hw->clk_rate = 80000000;
min_rate = 22000000;
wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
} else {
/* ONFI EDO mode 5 */
hw->clk_rate = 100000000;
min_rate = 80000000;
wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
}
clk_rate = clk_round_rate(r->clock[0], hw->clk_rate);
if (clk_rate <= min_rate) {
dev_err(this->dev, "clock setting: expected %ld, got %ld\n",
hw->clk_rate, clk_rate);
return -ENOTSUPP;
}
hw->clk_rate = clk_rate;
/* SDR core timings are given in picoseconds */
period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(DIV_ROUND_UP(busy_timeout_cycles, 4096));
/*
* Derive NFC ideal delay from {3}:
*
* (tREA + 4000 - tRP) * 8
* RDN_DELAY = -----------------------
* RP
*/
if (period_ps > dll_threshold_ps) {
use_half_period = true;
reference_period_ps = period_ps / 2;
} else {
use_half_period = false;
reference_period_ps = period_ps;
}
tRP_ps = data_setup_cycles * period_ps;
sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
if (sample_delay_ps > 0)
sample_delay_factor = sample_delay_ps / reference_period_ps;
else
sample_delay_factor = 0;
hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
if (sample_delay_factor)
hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
BM_GPMI_CTRL1_DLL_ENABLE |
(use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
return 0;
}
static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
{
struct gpmi_nfc_hardware_timing *hw = &this->hw;
struct resources *r = &this->resources;
void __iomem *gpmi_regs = r->gpmi_regs;
unsigned int dll_wait_time_us;
int ret;
/* Clock dividers do NOT guarantee a clean clock signal on its output
* during the change of the divide factor on i.MX6Q/UL/SX. On i.MX7/8,
* all clock dividers provide these guarantee.
*/
if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this))
clk_disable_unprepare(r->clock[0]);
ret = clk_set_rate(r->clock[0], hw->clk_rate);
if (ret) {
dev_err(this->dev, "cannot set clock rate to %lu Hz: %d\n", hw->clk_rate, ret);
return ret;
}
if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) {
ret = clk_prepare_enable(r->clock[0]);
if (ret)
return ret;
}
writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
/*
* Clear several CTRL1 fields, DLL must be disabled when setting
* RDN_DELAY or HALF_PERIOD.
*/
writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
/* Wait 64 clock cycles before using the GPMI after enabling the DLL */
dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
if (!dll_wait_time_us)
dll_wait_time_us = 1;
/* Wait for the DLL to settle. */
udelay(dll_wait_time_us);
return 0;
}
static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
const struct nand_interface_config *conf)
{
struct gpmi_nand_data *this = nand_get_controller_data(chip);
const struct nand_sdr_timings *sdr;
int ret;
/* Retrieve required NAND timings */
sdr = nand_get_sdr_timings(conf);
if (IS_ERR(sdr))
return PTR_ERR(sdr);
/* Only MX28/MX6 GPMI controller can reach EDO timings */
if (sdr->tRC_min <= 25000 && !GPMI_IS_MX28(this) && !GPMI_IS_MX6(this))
return -ENOTSUPP;
/* Stop here if this call was just a check */
if (chipnr < 0)
return 0;
/* Do the actual derivation of the controller timings */
ret = gpmi_nfc_compute_timings(this, sdr);
if (ret)
return ret;
this->hw.must_apply_timings = true;
return 0;
}
/* Clears a BCH interrupt. */
static void gpmi_clear_bch(struct gpmi_nand_data *this)
{
struct resources *r = &this->resources;
writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
}
static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
{
/* We use the DMA channel 0 to access all the nand chips. */
return this->dma_chans[0];
}
/* This will be called after the DMA operation is finished. */
static void dma_irq_callback(void *param)
{
struct gpmi_nand_data *this = param;
struct completion *dma_c = &this->dma_done;
complete(dma_c);
}
static irqreturn_t bch_irq(int irq, void *cookie)
{
struct gpmi_nand_data *this = cookie;
gpmi_clear_bch(this);
complete(&this->bch_done);
return IRQ_HANDLED;
}
static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len)
{
/*
* raw_len is the length to read/write including bch data which
* we are passed in exec_op. Calculate the data length from it.
*/
if (this->bch)
return ALIGN_DOWN(raw_len, this->bch_geometry.eccn_chunk_size);
else
return raw_len;
}
/* Can we use the upper's buffer directly for DMA? */
static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf,
int raw_len, struct scatterlist *sgl,
enum dma_data_direction dr)
{
int ret;
int len = gpmi_raw_len_to_len(this, raw_len);
/* first try to map the upper buffer directly */
if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
sg_init_one(sgl, buf, len);
ret = dma_map_sg(this->dev, sgl, 1, dr);
if (ret == 0)
goto map_fail;
return true;
}
map_fail:
/* We have to use our own DMA buffer. */
sg_init_one(sgl, this->data_buffer_dma, len);
if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma)
memcpy(this->data_buffer_dma, buf, len);
dma_map_sg(this->dev, sgl, 1, dr);
return false;
}
/* add our owner bbt descriptor */
static uint8_t scan_ff_pattern[] = { 0xff };
static struct nand_bbt_descr gpmi_bbt_descr = {
.options = 0,
.offs = 0,
.len = 1,
.pattern = scan_ff_pattern
};
/*
* We may change the layout if we can get the ECC info from the datasheet,
* else we will use all the (page + OOB).
*/
static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *geo = &this->bch_geometry;
if (section)
return -ERANGE;
oobregion->offset = 0;
oobregion->length = geo->page_size - mtd->writesize;
return 0;
}
static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *geo = &this->bch_geometry;
if (section)
return -ERANGE;
/* The available oob size we have. */
if (geo->page_size < mtd->writesize + mtd->oobsize) {
oobregion->offset = geo->page_size - mtd->writesize;
oobregion->length = mtd->oobsize - oobregion->offset;
}
return 0;
}
static const char * const gpmi_clks_for_mx2x[] = {
"gpmi_io",
};
static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
.ecc = gpmi_ooblayout_ecc,
.free = gpmi_ooblayout_free,
};
static const struct gpmi_devdata gpmi_devdata_imx23 = {
.type = IS_MX23,
.bch_max_ecc_strength = 20,
.max_chain_delay = 16000,
.clks = gpmi_clks_for_mx2x,
.clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
};
static const struct gpmi_devdata gpmi_devdata_imx28 = {
.type = IS_MX28,
.bch_max_ecc_strength = 20,
.max_chain_delay = 16000,
.clks = gpmi_clks_for_mx2x,
.clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
};
static const char * const gpmi_clks_for_mx6[] = {
"gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
};
static const struct gpmi_devdata gpmi_devdata_imx6q = {
.type = IS_MX6Q,
.bch_max_ecc_strength = 40,
.max_chain_delay = 12000,
.clks = gpmi_clks_for_mx6,
.clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
};
static const struct gpmi_devdata gpmi_devdata_imx6sx = {
.type = IS_MX6SX,
.bch_max_ecc_strength = 62,
.max_chain_delay = 12000,
.clks = gpmi_clks_for_mx6,
.clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
};
static const char * const gpmi_clks_for_mx7d[] = {
"gpmi_io", "gpmi_bch_apb",
};
static const struct gpmi_devdata gpmi_devdata_imx7d = {
.type = IS_MX7D,
.bch_max_ecc_strength = 62,
.max_chain_delay = 12000,
.clks = gpmi_clks_for_mx7d,
.clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
};
static int acquire_register_block(struct gpmi_nand_data *this,
const char *res_name)
{
struct platform_device *pdev = this->pdev;
struct resources *res = &this->resources;
void __iomem *p;
p = devm_platform_ioremap_resource_byname(pdev, res_name);
if (IS_ERR(p))
return PTR_ERR(p);
if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
res->gpmi_regs = p;
else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
res->bch_regs = p;
else
dev_err(this->dev, "unknown resource name : %s\n", res_name);
return 0;
}
static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
{
struct platform_device *pdev = this->pdev;
const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
int err;
err = platform_get_irq_byname(pdev, res_name);
if (err < 0)
return err;
err = devm_request_irq(this->dev, err, irq_h, 0, res_name, this);
if (err)
dev_err(this->dev, "error requesting BCH IRQ\n");
return err;
}
static void release_dma_channels(struct gpmi_nand_data *this)
{
unsigned int i;
for (i = 0; i < DMA_CHANS; i++)
if (this->dma_chans[i]) {
dma_release_channel(this->dma_chans[i]);
this->dma_chans[i] = NULL;
}
}
static int acquire_dma_channels(struct gpmi_nand_data *this)
{
struct platform_device *pdev = this->pdev;
struct dma_chan *dma_chan;
int ret = 0;
/* request dma channel */
dma_chan = dma_request_chan(&pdev->dev, "rx-tx");
if (IS_ERR(dma_chan)) {
ret = dev_err_probe(this->dev, PTR_ERR(dma_chan),
"DMA channel request failed\n");
release_dma_channels(this);
} else {
this->dma_chans[0] = dma_chan;
}
return ret;
}
static int gpmi_get_clks(struct gpmi_nand_data *this)
{
struct resources *r = &this->resources;
struct clk *clk;
int err, i;
for (i = 0; i < this->devdata->clks_count; i++) {
clk = devm_clk_get(this->dev, this->devdata->clks[i]);
if (IS_ERR(clk)) {
err = PTR_ERR(clk);
goto err_clock;
}
r->clock[i] = clk;
}
return 0;
err_clock:
dev_dbg(this->dev, "failed in finding the clocks.\n");
return err;
}
static int acquire_resources(struct gpmi_nand_data *this)
{
int ret;
ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
if (ret)
goto exit_regs;
ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
if (ret)
goto exit_regs;
ret = acquire_bch_irq(this, bch_irq);
if (ret)
goto exit_regs;
ret = acquire_dma_channels(this);
if (ret)
goto exit_regs;
ret = gpmi_get_clks(this);
if (ret)
goto exit_clock;
return 0;
exit_clock:
release_dma_channels(this);
exit_regs:
return ret;
}
static void release_resources(struct gpmi_nand_data *this)
{
release_dma_channels(this);
}
static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
{
struct device *dev = this->dev;
struct bch_geometry *geo = &this->bch_geometry;
if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt))
dma_free_coherent(dev, geo->auxiliary_size,
this->auxiliary_virt,
this->auxiliary_phys);
kfree(this->data_buffer_dma);
kfree(this->raw_buffer);
this->data_buffer_dma = NULL;
this->raw_buffer = NULL;
}
/* Allocate the DMA buffers */
static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
{
struct bch_geometry *geo = &this->bch_geometry;
struct device *dev = this->dev;
struct mtd_info *mtd = nand_to_mtd(&this->nand);
/*
* [2] Allocate a read/write data buffer.
* The gpmi_alloc_dma_buffer can be called twice.
* We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
* is called before the NAND identification; and we allocate a
* buffer of the real NAND page size when the gpmi_alloc_dma_buffer
* is called after.
*/
this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
GFP_DMA | GFP_KERNEL);
if (this->data_buffer_dma == NULL)
goto error_alloc;
this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size,
&this->auxiliary_phys, GFP_DMA);
if (!this->auxiliary_virt)
goto error_alloc;
this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
if (!this->raw_buffer)
goto error_alloc;
return 0;
error_alloc:
gpmi_free_dma_buffer(this);
return -ENOMEM;
}
/*
* Handles block mark swapping.
* It can be called in swapping the block mark, or swapping it back,
* because the operations are the same.
*/
static void block_mark_swapping(struct gpmi_nand_data *this,
void *payload, void *auxiliary)
{
struct bch_geometry *nfc_geo = &this->bch_geometry;
unsigned char *p;
unsigned char *a;
unsigned int bit;
unsigned char mask;
unsigned char from_data;
unsigned char from_oob;
if (!this->swap_block_mark)
return;
/*
* If control arrives here, we're swapping. Make some convenience
* variables.
*/
bit = nfc_geo->block_mark_bit_offset;
p = payload + nfc_geo->block_mark_byte_offset;
a = auxiliary;
/*
* Get the byte from the data area that overlays the block mark. Since
* the ECC engine applies its own view to the bits in the page, the
* physical block mark won't (in general) appear on a byte boundary in
* the data.
*/
from_data = (p[0] >> bit) | (p[1] << (8 - bit));
/* Get the byte from the OOB. */
from_oob = a[0];
/* Swap them. */
a[0] = from_data;
mask = (0x1 << bit) - 1;
p[0] = (p[0] & mask) | (from_oob << bit);
mask = ~0 << bit;
p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
}
static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first,
int last, int meta)
{
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
struct mtd_info *mtd = nand_to_mtd(chip);
int i;
unsigned char *status;
unsigned int max_bitflips = 0;
/* Loop over status bytes, accumulating ECC status. */
status = this->auxiliary_virt + ALIGN(meta, 4);
for (i = first; i < last; i++, status++) {
if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
continue;
if (*status == STATUS_UNCORRECTABLE) {
int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
u8 *eccbuf = this->raw_buffer;
int offset, bitoffset;
int eccbytes;
int flips;
/* Read ECC bytes into our internal raw_buffer */
offset = nfc_geo->metadata_size * 8;
offset += ((8 * nfc_geo->eccn_chunk_size) + eccbits) * (i + 1);
offset -= eccbits;
bitoffset = offset % 8;
eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
offset /= 8;
eccbytes -= offset;
nand_change_read_column_op(chip, offset, eccbuf,
eccbytes, false);
/*
* ECC data are not byte aligned and we may have
* in-band data in the first and last byte of
* eccbuf. Set non-eccbits to one so that
* nand_check_erased_ecc_chunk() does not count them
* as bitflips.
*/
if (bitoffset)
eccbuf[0] |= GENMASK(bitoffset - 1, 0);
bitoffset = (bitoffset + eccbits) % 8;
if (bitoffset)
eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
/*
* The ECC hardware has an uncorrectable ECC status
* code in case we have bitflips in an erased page. As
* nothing was written into this subpage the ECC is
* obviously wrong and we can not trust it. We assume
* at this point that we are reading an erased page and
* try to correct the bitflips in buffer up to
* ecc_strength bitflips. If this is a page with random
* data, we exceed this number of bitflips and have a
* ECC failure. Otherwise we use the corrected buffer.
*/
if (i == 0) {
/* The first block includes metadata */
flips = nand_check_erased_ecc_chunk(
buf + i * nfc_geo->eccn_chunk_size,
nfc_geo->eccn_chunk_size,
eccbuf, eccbytes,
this->auxiliary_virt,
nfc_geo->metadata_size,
nfc_geo->ecc_strength);
} else {
flips = nand_check_erased_ecc_chunk(
buf + i * nfc_geo->eccn_chunk_size,
nfc_geo->eccn_chunk_size,
eccbuf, eccbytes,
NULL, 0,
nfc_geo->ecc_strength);
}
if (flips > 0) {
max_bitflips = max_t(unsigned int, max_bitflips,
flips);
mtd->ecc_stats.corrected += flips;
continue;
}
mtd->ecc_stats.failed++;
continue;
}
mtd->ecc_stats.corrected += *status;
max_bitflips = max_t(unsigned int, max_bitflips, *status);
}
return max_bitflips;
}
static void gpmi_bch_layout_std(struct gpmi_nand_data *this)
{
struct bch_geometry *geo = &this->bch_geometry;
unsigned int ecc_strength = geo->ecc_strength >> 1;
unsigned int gf_len = geo->gf_len;
unsigned int block0_size = geo->ecc0_chunk_size;
unsigned int blockn_size = geo->eccn_chunk_size;
this->bch_flashlayout0 =
BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) |
BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) |
BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) |
BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block0_size, this);
this->bch_flashlayout1 =
BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) |
BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) |
BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(blockn_size, this);
}
static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
struct bch_geometry *geo = &this->bch_geometry;
unsigned int max_bitflips;
int ret;
gpmi_bch_layout_std(this);
this->bch = true;
ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
if (ret)
return ret;
max_bitflips = gpmi_count_bitflips(chip, buf, 0,
geo->ecc_chunk_count,
geo->auxiliary_status_offset);
/* handle the block mark swapping */
block_mark_swapping(this, buf, this->auxiliary_virt);
if (oob_required) {
/*
* It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
* for details about our policy for delivering the OOB.
*
* We fill the caller's buffer with set bits, and then copy the
* block mark to th caller's buffer. Note that, if block mark
* swapping was necessary, it has already been done, so we can
* rely on the first byte of the auxiliary buffer to contain
* the block mark.
*/
memset(chip->oob_poi, ~0, mtd->oobsize);
chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0];
}
return max_bitflips;
}
/* Fake a virtual small page for the subpage read */
static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
uint32_t len, uint8_t *buf, int page)
{
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *geo = &this->bch_geometry;
int size = chip->ecc.size; /* ECC chunk size */
int meta, n, page_size;
unsigned int max_bitflips;
unsigned int ecc_strength;
int first, last, marker_pos;
int ecc_parity_size;
int col = 0;
int ret;
/* The size of ECC parity */
ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
/* Align it with the chunk size */
first = offs / size;
last = (offs + len - 1) / size;
if (this->swap_block_mark) {
/*
* Find the chunk which contains the Block Marker.
* If this chunk is in the range of [first, last],
* we have to read out the whole page.
* Why? since we had swapped the data at the position of Block
* Marker to the metadata which is bound with the chunk 0.
*/
marker_pos = geo->block_mark_byte_offset / size;
if (last >= marker_pos && first <= marker_pos) {
dev_dbg(this->dev,
"page:%d, first:%d, last:%d, marker at:%d\n",
page, first, last, marker_pos);
return gpmi_ecc_read_page(chip, buf, 0, page);
}
}
/*
* if there is an ECC dedicate for meta:
* - need to add an extra ECC size when calculating col and page_size,
* if the meta size is NOT zero.
* - ecc0_chunk size need to set to the same size as other chunks,
* if the meta size is zero.
*/
meta = geo->metadata_size;
if (first) {
if (geo->ecc_for_meta)
col = meta + ecc_parity_size
+ (size + ecc_parity_size) * first;
else
col = meta + (size + ecc_parity_size) * first;
meta = 0;
buf = buf + first * size;
}
ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
n = last - first + 1;
if (geo->ecc_for_meta && meta)
page_size = meta + ecc_parity_size
+ (size + ecc_parity_size) * n;
else
page_size = meta + (size + ecc_parity_size) * n;
ecc_strength = geo->ecc_strength >> 1;
this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(
(geo->ecc_for_meta ? n : n - 1)) |
BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) |
BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) |
BF_BCH_FLASH0LAYOUT0_DATA0_SIZE((geo->ecc_for_meta ?
0 : geo->ecc0_chunk_size), this);
this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) |
BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) |
BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->eccn_chunk_size, this);
this->bch = true;
ret = nand_read_page_op(chip, page, col, buf, page_size);
if (ret)
return ret;
dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
page, offs, len, col, first, n, page_size);
max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta);
return max_bitflips;
}
static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
dev_dbg(this->dev, "ecc write page.\n");
gpmi_bch_layout_std(this);
this->bch = true;
memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size);
if (this->swap_block_mark) {
/*
* When doing bad block marker swapping we must always copy the
* input buffer as we can't modify the const buffer.
*/
memcpy(this->data_buffer_dma, buf, mtd->writesize);
buf = this->data_buffer_dma;
block_mark_swapping(this, this->data_buffer_dma,
this->auxiliary_virt);
}
return nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
}
/*
* There are several places in this driver where we have to handle the OOB and
* block marks. This is the function where things are the most complicated, so
* this is where we try to explain it all. All the other places refer back to
* here.
*
* These are the rules, in order of decreasing importance:
*
* 1) Nothing the caller does can be allowed to imperil the block mark.
*
* 2) In read operations, the first byte of the OOB we return must reflect the
* true state of the block mark, no matter where that block mark appears in
* the physical page.
*
* 3) ECC-based read operations return an OOB full of set bits (since we never
* allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
* return).
*
* 4) "Raw" read operations return a direct view of the physical bytes in the
* page, using the conventional definition of which bytes are data and which
* are OOB. This gives the caller a way to see the actual, physical bytes
* in the page, without the distortions applied by our ECC engine.
*
*
* What we do for this specific read operation depends on two questions:
*
* 1) Are we doing a "raw" read, or an ECC-based read?
*
* 2) Are we using block mark swapping or transcription?
*
* There are four cases, illustrated by the following Karnaugh map:
*
* | Raw | ECC-based |
* -------------+-------------------------+-------------------------+
* | Read the conventional | |
* | OOB at the end of the | |
* Swapping | page and return it. It | |
* | contains exactly what | |
* | we want. | Read the block mark and |
* -------------+-------------------------+ return it in a buffer |
* | Read the conventional | full of set bits. |
* | OOB at the end of the | |
* | page and also the block | |
* Transcribing | mark in the metadata. | |
* | Copy the block mark | |
* | into the first byte of | |
* | the OOB. | |
* -------------+-------------------------+-------------------------+
*
* Note that we break rule #4 in the Transcribing/Raw case because we're not
* giving an accurate view of the actual, physical bytes in the page (we're
* overwriting the block mark). That's OK because it's more important to follow
* rule #2.
*
* It turns out that knowing whether we want an "ECC-based" or "raw" read is not
* easy. When reading a page, for example, the NAND Flash MTD code calls our
* ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
* ECC-based or raw view of the page is implicit in which function it calls
* (there is a similar pair of ECC-based/raw functions for writing).
*/
static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
int ret;
/* clear the OOB buffer */
memset(chip->oob_poi, ~0, mtd->oobsize);
/* Read out the conventional OOB. */
ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
mtd->oobsize);
if (ret)
return ret;
/*
* Now, we want to make sure the block mark is correct. In the
* non-transcribing case (!GPMI_IS_MX23()), we already have it.
* Otherwise, we need to explicitly read it.
*/
if (GPMI_IS_MX23(this)) {
/* Read the block mark into the first byte of the OOB buffer. */
ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
if (ret)
return ret;
}
return 0;
}
static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mtd_oob_region of = { };
/* Do we have available oob area? */
mtd_ooblayout_free(mtd, 0, &of);
if (!of.length)
return -EPERM;
if (!nand_is_slc(chip))
return -EPERM;
return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
chip->oob_poi + of.offset, of.length);
}
/*
* This function reads a NAND page without involving the ECC engine (no HW
* ECC correction).
* The tricky part in the GPMI/BCH controller is that it stores ECC bits
* inline (interleaved with payload DATA), and do not align data chunk on
* byte boundaries.
* We thus need to take care moving the payload data and ECC bits stored in the
* page into the provided buffers, which is why we're using nand_extract_bits().
*
* See set_geometry_by_ecc_info inline comments to have a full description
* of the layout used by the GPMI controller.
*/
static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
int eccsize = nfc_geo->eccn_chunk_size;
int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
u8 *tmp_buf = this->raw_buffer;
size_t src_bit_off;
size_t oob_bit_off;
size_t oob_byte_off;
uint8_t *oob = chip->oob_poi;
int step;
int ret;
ret = nand_read_page_op(chip, page, 0, tmp_buf,
mtd->writesize + mtd->oobsize);
if (ret)
return ret;
/*
* If required, swap the bad block marker and the data stored in the
* metadata section, so that we don't wrongly consider a block as bad.
*
* See the layout description for a detailed explanation on why this
* is needed.
*/
if (this->swap_block_mark)
swap(tmp_buf[0], tmp_buf[mtd->writesize]);
/*
* Copy the metadata section into the oob buffer (this section is
* guaranteed to be aligned on a byte boundary).
*/
if (oob_required)
memcpy(oob, tmp_buf, nfc_geo->metadata_size);
oob_bit_off = nfc_geo->metadata_size * 8;
src_bit_off = oob_bit_off;
/* Extract interleaved payload data and ECC bits */
for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
if (buf)
nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
src_bit_off, eccsize * 8);
src_bit_off += eccsize * 8;
/* Align last ECC block to align a byte boundary */
if (step == nfc_geo->ecc_chunk_count - 1 &&
(oob_bit_off + eccbits) % 8)
eccbits += 8 - ((oob_bit_off + eccbits) % 8);
if (oob_required)
nand_extract_bits(oob, oob_bit_off, tmp_buf,
src_bit_off, eccbits);
src_bit_off += eccbits;
oob_bit_off += eccbits;
}
if (oob_required) {
oob_byte_off = oob_bit_off / 8;
if (oob_byte_off < mtd->oobsize)
memcpy(oob + oob_byte_off,
tmp_buf + mtd->writesize + oob_byte_off,
mtd->oobsize - oob_byte_off);
}
return 0;
}
/*
* This function writes a NAND page without involving the ECC engine (no HW
* ECC generation).
* The tricky part in the GPMI/BCH controller is that it stores ECC bits
* inline (interleaved with payload DATA), and do not align data chunk on
* byte boundaries.
* We thus need to take care moving the OOB area at the right place in the
* final page, which is why we're using nand_extract_bits().
*
* See set_geometry_by_ecc_info inline comments to have a full description
* of the layout used by the GPMI controller.
*/
static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
int eccsize = nfc_geo->eccn_chunk_size;
int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
u8 *tmp_buf = this->raw_buffer;
uint8_t *oob = chip->oob_poi;
size_t dst_bit_off;
size_t oob_bit_off;
size_t oob_byte_off;
int step;
/*
* Initialize all bits to 1 in case we don't have a buffer for the
* payload or oob data in order to leave unspecified bits of data
* to their initial state.
*/
if (!buf || !oob_required)
memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
/*
* First copy the metadata section (stored in oob buffer) at the
* beginning of the page, as imposed by the GPMI layout.
*/
memcpy(tmp_buf, oob, nfc_geo->metadata_size);
oob_bit_off = nfc_geo->metadata_size * 8;
dst_bit_off = oob_bit_off;
/* Interleave payload data and ECC bits */
for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
if (buf)
nand_extract_bits(tmp_buf, dst_bit_off, buf,
step * eccsize * 8, eccsize * 8);
dst_bit_off += eccsize * 8;
/* Align last ECC block to align a byte boundary */
if (step == nfc_geo->ecc_chunk_count - 1 &&
(oob_bit_off + eccbits) % 8)
eccbits += 8 - ((oob_bit_off + eccbits) % 8);
if (oob_required)
nand_extract_bits(tmp_buf, dst_bit_off, oob,
oob_bit_off, eccbits);
dst_bit_off += eccbits;
oob_bit_off += eccbits;
}
oob_byte_off = oob_bit_off / 8;
if (oob_required && oob_byte_off < mtd->oobsize)
memcpy(tmp_buf + mtd->writesize + oob_byte_off,
oob + oob_byte_off, mtd->oobsize - oob_byte_off);
/*
* If required, swap the bad block marker and the first byte of the
* metadata section, so that we don't modify the bad block marker.
*
* See the layout description for a detailed explanation on why this
* is needed.
*/
if (this->swap_block_mark)
swap(tmp_buf[0], tmp_buf[mtd->writesize]);
return nand_prog_page_op(chip, page, 0, tmp_buf,
mtd->writesize + mtd->oobsize);
}
static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
{
return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
}
static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
{
return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
}
static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
int ret = 0;
uint8_t *block_mark;
int column, page, chipnr;
chipnr = (int)(ofs >> chip->chip_shift);
nand_select_target(chip, chipnr);
column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
/* Write the block mark. */
block_mark = this->data_buffer_dma;
block_mark[0] = 0; /* bad block marker */
/* Shift to get page */
page = (int)(ofs >> chip->page_shift);
ret = nand_prog_page_op(chip, page, column, block_mark, 1);
nand_deselect_target(chip);
return ret;
}
static int nand_boot_set_geometry(struct gpmi_nand_data *this)
{
struct boot_rom_geometry *geometry = &this->rom_geometry;
/*
* Set the boot block stride size.
*
* In principle, we should be reading this from the OTP bits, since
* that's where the ROM is going to get it. In fact, we don't have any
* way to read the OTP bits, so we go with the default and hope for the
* best.
*/
geometry->stride_size_in_pages = 64;
/*
* Set the search area stride exponent.
*
* In principle, we should be reading this from the OTP bits, since
* that's where the ROM is going to get it. In fact, we don't have any
* way to read the OTP bits, so we go with the default and hope for the
* best.
*/
geometry->search_area_stride_exponent = 2;
return 0;
}
static const char *fingerprint = "STMP";
static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
{
struct boot_rom_geometry *rom_geo = &this->rom_geometry;
struct device *dev = this->dev;
struct nand_chip *chip = &this->nand;
unsigned int search_area_size_in_strides;
unsigned int stride;
unsigned int page;
u8 *buffer = nand_get_data_buf(chip);
int found_an_ncb_fingerprint = false;
int ret;
/* Compute the number of strides in a search area. */
search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
nand_select_target(chip, 0);
/*
* Loop through the first search area, looking for the NCB fingerprint.
*/
dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
for (stride = 0; stride < search_area_size_in_strides; stride++) {
/* Compute the page addresses. */
page = stride * rom_geo->stride_size_in_pages;
dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
/*
* Read the NCB fingerprint. The fingerprint is four bytes long
* and starts in the 12th byte of the page.
*/
ret = nand_read_page_op(chip, page, 12, buffer,
strlen(fingerprint));
if (ret)
continue;
/* Look for the fingerprint. */
if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
found_an_ncb_fingerprint = true;
break;
}
}
nand_deselect_target(chip);
if (found_an_ncb_fingerprint)
dev_dbg(dev, "\tFound a fingerprint\n");
else
dev_dbg(dev, "\tNo fingerprint found\n");
return found_an_ncb_fingerprint;
}
/* Writes a transcription stamp. */
static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
{
struct device *dev = this->dev;
struct boot_rom_geometry *rom_geo = &this->rom_geometry;
struct nand_chip *chip = &this->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int block_size_in_pages;
unsigned int search_area_size_in_strides;
unsigned int search_area_size_in_pages;
unsigned int search_area_size_in_blocks;
unsigned int block;
unsigned int stride;
unsigned int page;
u8 *buffer = nand_get_data_buf(chip);
int status;
/* Compute the search area geometry. */
block_size_in_pages = mtd->erasesize / mtd->writesize;
search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
search_area_size_in_pages = search_area_size_in_strides *
rom_geo->stride_size_in_pages;
search_area_size_in_blocks =
(search_area_size_in_pages + (block_size_in_pages - 1)) /
block_size_in_pages;
dev_dbg(dev, "Search Area Geometry :\n");
dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
nand_select_target(chip, 0);
/* Loop over blocks in the first search area, erasing them. */
dev_dbg(dev, "Erasing the search area...\n");
for (block = 0; block < search_area_size_in_blocks; block++) {
/* Erase this block. */
dev_dbg(dev, "\tErasing block 0x%x\n", block);
status = nand_erase_op(chip, block);
if (status)
dev_err(dev, "[%s] Erase failed.\n", __func__);
}
/* Write the NCB fingerprint into the page buffer. */
memset(buffer, ~0, mtd->writesize);
memcpy(buffer + 12, fingerprint, strlen(fingerprint));
/* Loop through the first search area, writing NCB fingerprints. */
dev_dbg(dev, "Writing NCB fingerprints...\n");
for (stride = 0; stride < search_area_size_in_strides; stride++) {
/* Compute the page addresses. */
page = stride * rom_geo->stride_size_in_pages;
/* Write the first page of the current stride. */
dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
status = chip->ecc.write_page_raw(chip, buffer, 0, page);
if (status)
dev_err(dev, "[%s] Write failed.\n", __func__);
}
nand_deselect_target(chip);
return 0;
}
static int mx23_boot_init(struct gpmi_nand_data *this)
{
struct device *dev = this->dev;
struct nand_chip *chip = &this->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int block_count;
unsigned int block;
int chipnr;
int page;
loff_t byte;
uint8_t block_mark;
int ret = 0;
/*
* If control arrives here, we can't use block mark swapping, which
* means we're forced to use transcription. First, scan for the
* transcription stamp. If we find it, then we don't have to do
* anything -- the block marks are already transcribed.
*/
if (mx23_check_transcription_stamp(this))
return 0;
/*
* If control arrives here, we couldn't find a transcription stamp, so
* so we presume the block marks are in the conventional location.
*/
dev_dbg(dev, "Transcribing bad block marks...\n");
/* Compute the number of blocks in the entire medium. */
block_count = nanddev_eraseblocks_per_target(&chip->base);
/*
* Loop over all the blocks in the medium, transcribing block marks as
* we go.
*/
for (block = 0; block < block_count; block++) {
/*
* Compute the chip, page and byte addresses for this block's
* conventional mark.
*/
chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
page = block << (chip->phys_erase_shift - chip->page_shift);
byte = block << chip->phys_erase_shift;
/* Send the command to read the conventional block mark. */
nand_select_target(chip, chipnr);
ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
1);
nand_deselect_target(chip);
if (ret)
continue;
/*
* Check if the block is marked bad. If so, we need to mark it
* again, but this time the result will be a mark in the
* location where we transcribe block marks.
*/
if (block_mark != 0xff) {
dev_dbg(dev, "Transcribing mark in block %u\n", block);
ret = chip->legacy.block_markbad(chip, byte);
if (ret)
dev_err(dev,
"Failed to mark block bad with ret %d\n",
ret);
}
}
/* Write the stamp that indicates we've transcribed the block marks. */
mx23_write_transcription_stamp(this);
return 0;
}
static int nand_boot_init(struct gpmi_nand_data *this)
{
nand_boot_set_geometry(this);
/* This is ROM arch-specific initilization before the BBT scanning. */
if (GPMI_IS_MX23(this))
return mx23_boot_init(this);
return 0;
}
static int gpmi_set_geometry(struct gpmi_nand_data *this)
{
int ret;
/* Free the temporary DMA memory for reading ID. */
gpmi_free_dma_buffer(this);
/* Set up the NFC geometry which is used by BCH. */
ret = bch_set_geometry(this);
if (ret) {
dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
return ret;
}
/* Alloc the new DMA buffers according to the pagesize and oobsize */
return gpmi_alloc_dma_buffer(this);
}
static int gpmi_init_last(struct gpmi_nand_data *this)
{
struct nand_chip *chip = &this->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
struct bch_geometry *bch_geo = &this->bch_geometry;
int ret;
/* Set up the medium geometry */
ret = gpmi_set_geometry(this);
if (ret)
return ret;
/* Init the nand_ecc_ctrl{} */
ecc->read_page = gpmi_ecc_read_page;
ecc->write_page = gpmi_ecc_write_page;
ecc->read_oob = gpmi_ecc_read_oob;
ecc->write_oob = gpmi_ecc_write_oob;
ecc->read_page_raw = gpmi_ecc_read_page_raw;
ecc->write_page_raw = gpmi_ecc_write_page_raw;
ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
ecc->size = bch_geo->eccn_chunk_size;
ecc->strength = bch_geo->ecc_strength;
mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
/*
* We only enable the subpage read when:
* (1) the chip is imx6, and
* (2) the size of the ECC parity is byte aligned.
*/
if (GPMI_IS_MX6(this) &&
((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
ecc->read_subpage = gpmi_ecc_read_subpage;
chip->options |= NAND_SUBPAGE_READ;
}
return 0;
}
static int gpmi_nand_attach_chip(struct nand_chip *chip)
{
struct gpmi_nand_data *this = nand_get_controller_data(chip);
int ret;
if (chip->bbt_options & NAND_BBT_USE_FLASH) {
chip->bbt_options |= NAND_BBT_NO_OOB;
if (of_property_read_bool(this->dev->of_node,
"fsl,no-blockmark-swap"))
this->swap_block_mark = false;
}
dev_dbg(this->dev, "Blockmark swapping %sabled\n",
this->swap_block_mark ? "en" : "dis");
ret = gpmi_init_last(this);
if (ret)
return ret;
chip->options |= NAND_SKIP_BBTSCAN;
return 0;
}
static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this)
{
struct gpmi_transfer *transfer = &this->transfers[this->ntransfers];
this->ntransfers++;
if (this->ntransfers == GPMI_MAX_TRANSFERS)
return NULL;
return transfer;
}
static struct dma_async_tx_descriptor *gpmi_chain_command(
struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr)
{
struct dma_chan *channel = get_dma_chan(this);
struct dma_async_tx_descriptor *desc;
struct gpmi_transfer *transfer;
int chip = this->nand.cur_cs;
u32 pio[3];
/* [1] send out the PIO words */
pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
| BM_GPMI_CTRL0_WORD_LENGTH
| BF_GPMI_CTRL0_CS(chip, this)
| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
| BM_GPMI_CTRL0_ADDRESS_INCREMENT
| BF_GPMI_CTRL0_XFER_COUNT(naddr + 1);
pio[1] = 0;
pio[2] = 0;
desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
DMA_TRANS_NONE, 0);
if (!desc)
return NULL;
transfer = get_next_transfer(this);
if (!transfer)
return NULL;
transfer->cmdbuf[0] = cmd;
if (naddr)
memcpy(&transfer->cmdbuf[1], addr, naddr);
sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1);
dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE);
transfer->direction = DMA_TO_DEVICE;
desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV,
MXS_DMA_CTRL_WAIT4END);
return desc;
}
static struct dma_async_tx_descriptor *gpmi_chain_wait_ready(
struct gpmi_nand_data *this)
{
struct dma_chan *channel = get_dma_chan(this);
u32 pio[2];
pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY)
| BM_GPMI_CTRL0_WORD_LENGTH
| BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
| BF_GPMI_CTRL0_XFER_COUNT(0);
pio[1] = 0;
return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE,
MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY);
}
static struct dma_async_tx_descriptor *gpmi_chain_data_read(
struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct)
{
struct dma_async_tx_descriptor *desc;
struct dma_chan *channel = get_dma_chan(this);
struct gpmi_transfer *transfer;
u32 pio[6] = {};
transfer = get_next_transfer(this);
if (!transfer)
return NULL;
transfer->direction = DMA_FROM_DEVICE;
*direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl,
DMA_FROM_DEVICE);
pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
| BM_GPMI_CTRL0_WORD_LENGTH
| BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
| BF_GPMI_CTRL0_XFER_COUNT(raw_len);
if (this->bch) {
pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
| BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE)
| BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
| BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
pio[3] = raw_len;
pio[4] = transfer->sgl.dma_address;
pio[5] = this->auxiliary_phys;
}
desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
DMA_TRANS_NONE, 0);
if (!desc)
return NULL;
if (!this->bch)
desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
DMA_DEV_TO_MEM,
MXS_DMA_CTRL_WAIT4END);
return desc;
}
static struct dma_async_tx_descriptor *gpmi_chain_data_write(
struct gpmi_nand_data *this, const void *buf, int raw_len)
{
struct dma_chan *channel = get_dma_chan(this);
struct dma_async_tx_descriptor *desc;
struct gpmi_transfer *transfer;
u32 pio[6] = {};
transfer = get_next_transfer(this);
if (!transfer)
return NULL;
transfer->direction = DMA_TO_DEVICE;
prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE);
pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
| BM_GPMI_CTRL0_WORD_LENGTH
| BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
| BF_GPMI_CTRL0_XFER_COUNT(raw_len);
if (this->bch) {
pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
| BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE)
| BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
pio[3] = raw_len;
pio[4] = transfer->sgl.dma_address;
pio[5] = this->auxiliary_phys;
}
desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
DMA_TRANS_NONE,
(this->bch ? MXS_DMA_CTRL_WAIT4END : 0));
if (!desc)
return NULL;
if (!this->bch)
desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
DMA_MEM_TO_DEV,
MXS_DMA_CTRL_WAIT4END);
return desc;
}
static int gpmi_nfc_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
const struct nand_op_instr *instr;
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct dma_async_tx_descriptor *desc = NULL;
int i, ret, buf_len = 0, nbufs = 0;
u8 cmd = 0;
void *buf_read = NULL;
const void *buf_write = NULL;
bool direct = false;
struct completion *dma_completion, *bch_completion;
unsigned long to;
if (check_only)
return 0;
this->ntransfers = 0;
for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
this->transfers[i].direction = DMA_NONE;
ret = pm_runtime_resume_and_get(this->dev);
if (ret < 0)
return ret;
/*
* This driver currently supports only one NAND chip. Plus, dies share
* the same configuration. So once timings have been applied on the
* controller side, they will not change anymore. When the time will
* come, the check on must_apply_timings will have to be dropped.
*/
if (this->hw.must_apply_timings) {
this->hw.must_apply_timings = false;
ret = gpmi_nfc_apply_timings(this);
if (ret)
goto out_pm;
}
dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
for (i = 0; i < op->ninstrs; i++) {
instr = &op->instrs[i];
nand_op_trace(" ", instr);
switch (instr->type) {
case NAND_OP_WAITRDY_INSTR:
desc = gpmi_chain_wait_ready(this);
break;
case NAND_OP_CMD_INSTR:
cmd = instr->ctx.cmd.opcode;
/*
* When this command has an address cycle chain it
* together with the address cycle
*/
if (i + 1 != op->ninstrs &&
op->instrs[i + 1].type == NAND_OP_ADDR_INSTR)
continue;
desc = gpmi_chain_command(this, cmd, NULL, 0);
break;
case NAND_OP_ADDR_INSTR:
desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs,
instr->ctx.addr.naddrs);
break;
case NAND_OP_DATA_OUT_INSTR:
buf_write = instr->ctx.data.buf.out;
buf_len = instr->ctx.data.len;
nbufs++;
desc = gpmi_chain_data_write(this, buf_write, buf_len);
break;
case NAND_OP_DATA_IN_INSTR:
if (!instr->ctx.data.len)
break;
buf_read = instr->ctx.data.buf.in;
buf_len = instr->ctx.data.len;
nbufs++;
desc = gpmi_chain_data_read(this, buf_read, buf_len,
&direct);
break;
}
if (!desc) {
ret = -ENXIO;
goto unmap;
}
}
dev_dbg(this->dev, "%s setup done\n", __func__);
if (nbufs > 1) {
dev_err(this->dev, "Multiple data instructions not supported\n");
ret = -EINVAL;
goto unmap;
}
if (this->bch) {
writel(this->bch_flashlayout0,
this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0);
writel(this->bch_flashlayout1,
this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
}
desc->callback = dma_irq_callback;
desc->callback_param = this;
dma_completion = &this->dma_done;
bch_completion = NULL;
init_completion(dma_completion);
if (this->bch && buf_read) {
writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
this->resources.bch_regs + HW_BCH_CTRL_SET);
bch_completion = &this->bch_done;
init_completion(bch_completion);
}
dmaengine_submit(desc);
dma_async_issue_pending(get_dma_chan(this));
to = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
if (!to) {
dev_err(this->dev, "DMA timeout, last DMA\n");
gpmi_dump_info(this);
ret = -ETIMEDOUT;
goto unmap;
}
if (this->bch && buf_read) {
to = wait_for_completion_timeout(bch_completion, msecs_to_jiffies(1000));
if (!to) {
dev_err(this->dev, "BCH timeout, last DMA\n");
gpmi_dump_info(this);
ret = -ETIMEDOUT;
goto unmap;
}
}
writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
this->resources.bch_regs + HW_BCH_CTRL_CLR);
gpmi_clear_bch(this);
ret = 0;
unmap:
for (i = 0; i < this->ntransfers; i++) {
struct gpmi_transfer *transfer = &this->transfers[i];
if (transfer->direction != DMA_NONE)
dma_unmap_sg(this->dev, &transfer->sgl, 1,
transfer->direction);
}
if (!ret && buf_read && !direct)
memcpy(buf_read, this->data_buffer_dma,
gpmi_raw_len_to_len(this, buf_len));
this->bch = false;
out_pm:
pm_runtime_mark_last_busy(this->dev);
pm_runtime_put_autosuspend(this->dev);
return ret;
}
static const struct nand_controller_ops gpmi_nand_controller_ops = {
.attach_chip = gpmi_nand_attach_chip,
.setup_interface = gpmi_setup_interface,
.exec_op = gpmi_nfc_exec_op,
};
static int gpmi_nand_init(struct gpmi_nand_data *this)
{
struct nand_chip *chip = &this->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
/* init the MTD data structures */
mtd->name = "gpmi-nand";
mtd->dev.parent = this->dev;
/* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
nand_set_controller_data(chip, this);
nand_set_flash_node(chip, this->pdev->dev.of_node);
chip->legacy.block_markbad = gpmi_block_markbad;
chip->badblock_pattern = &gpmi_bbt_descr;
chip->options |= NAND_NO_SUBPAGE_WRITE;
/* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
this->swap_block_mark = !GPMI_IS_MX23(this);
/*
* Allocate a temporary DMA buffer for reading ID in the
* nand_scan_ident().
*/
this->bch_geometry.payload_size = 1024;
this->bch_geometry.auxiliary_size = 128;
ret = gpmi_alloc_dma_buffer(this);
if (ret)
return ret;
nand_controller_init(&this->base);
this->base.ops = &gpmi_nand_controller_ops;
chip->controller = &this->base;
ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
if (ret)
goto err_out;
ret = nand_boot_init(this);
if (ret)
goto err_nand_cleanup;
ret = nand_create_bbt(chip);
if (ret)
goto err_nand_cleanup;
ret = mtd_device_register(mtd, NULL, 0);
if (ret)
goto err_nand_cleanup;
return 0;
err_nand_cleanup:
nand_cleanup(chip);
err_out:
gpmi_free_dma_buffer(this);
return ret;
}
static const struct of_device_id gpmi_nand_id_table[] = {
{ .compatible = "fsl,imx23-gpmi-nand", .data = &gpmi_devdata_imx23, },
{ .compatible = "fsl,imx28-gpmi-nand", .data = &gpmi_devdata_imx28, },
{ .compatible = "fsl,imx6q-gpmi-nand", .data = &gpmi_devdata_imx6q, },
{ .compatible = "fsl,imx6sx-gpmi-nand", .data = &gpmi_devdata_imx6sx, },
{ .compatible = "fsl,imx7d-gpmi-nand", .data = &gpmi_devdata_imx7d,},
{}
};
MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
static int gpmi_nand_probe(struct platform_device *pdev)
{
struct gpmi_nand_data *this;
int ret;
this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
if (!this)
return -ENOMEM;
this->devdata = of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, this);
this->pdev = pdev;
this->dev = &pdev->dev;
ret = acquire_resources(this);
if (ret)
goto exit_acquire_resources;
ret = __gpmi_enable_clk(this, true);
if (ret)
goto exit_acquire_resources;
pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
ret = gpmi_init(this);
if (ret)
goto exit_nfc_init;
ret = gpmi_nand_init(this);
if (ret)
goto exit_nfc_init;
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
dev_info(this->dev, "driver registered.\n");
return 0;
exit_nfc_init:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
release_resources(this);
exit_acquire_resources:
return ret;
}
static void gpmi_nand_remove(struct platform_device *pdev)
{
struct gpmi_nand_data *this = platform_get_drvdata(pdev);
struct nand_chip *chip = &this->nand;
int ret;
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
gpmi_free_dma_buffer(this);
release_resources(this);
}
#ifdef CONFIG_PM_SLEEP
static int gpmi_pm_suspend(struct device *dev)
{
struct gpmi_nand_data *this = dev_get_drvdata(dev);
release_dma_channels(this);
return 0;
}
static int gpmi_pm_resume(struct device *dev)
{
struct gpmi_nand_data *this = dev_get_drvdata(dev);
int ret;
ret = acquire_dma_channels(this);
if (ret < 0)
return ret;
/* re-init the GPMI registers */
ret = gpmi_init(this);
if (ret) {
dev_err(this->dev, "Error setting GPMI : %d\n", ret);
return ret;
}
/* Set flag to get timing setup restored for next exec_op */
if (this->hw.clk_rate)
this->hw.must_apply_timings = true;
/* re-init the BCH registers */
ret = bch_set_geometry(this);
if (ret) {
dev_err(this->dev, "Error setting BCH : %d\n", ret);
return ret;
}
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
{
struct gpmi_nand_data *this = dev_get_drvdata(dev);
return __gpmi_enable_clk(this, false);
}
static int __maybe_unused gpmi_runtime_resume(struct device *dev)
{
struct gpmi_nand_data *this = dev_get_drvdata(dev);
return __gpmi_enable_clk(this, true);
}
static const struct dev_pm_ops gpmi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
};
static struct platform_driver gpmi_nand_driver = {
.driver = {
.name = "gpmi-nand",
.pm = &gpmi_pm_ops,
.of_match_table = gpmi_nand_id_table,
},
.probe = gpmi_nand_probe,
.remove_new = gpmi_nand_remove,
};
module_platform_driver(gpmi_nand_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2017 ATMEL
* Copyright 2017 Free Electrons
*
* Author: Boris Brezillon <[email protected]>
*
* Derived from the atmel_nand.c driver which contained the following
* copyrights:
*
* Copyright 2003 Rick Bronson
*
* Derived from drivers/mtd/nand/autcpu12.c (removed in v3.8)
* Copyright 2001 Thomas Gleixner ([email protected])
*
* Derived from drivers/mtd/spia.c (removed in v3.8)
* Copyright 2000 Steven J. Hill ([email protected])
*
*
* Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
* Richard Genoud ([email protected]), Adeneo Copyright 2007
*
* Derived from Das U-Boot source code
* (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
* Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
*
* Add Programmable Multibit ECC support for various AT91 SoC
* Copyright 2012 ATMEL, Hong Xu
*
* Add Nand Flash Controller support for SAMA5 SoC
* Copyright 2013 ATMEL, Josh Wu ([email protected])
*
* A few words about the naming convention in this file. This convention
* applies to structure and function names.
*
* Prefixes:
*
* - atmel_nand_: all generic structures/functions
* - atmel_smc_nand_: all structures/functions specific to the SMC interface
* (at91sam9 and avr32 SoCs)
* - atmel_hsmc_nand_: all structures/functions specific to the HSMC interface
* (sama5 SoCs and later)
* - atmel_nfc_: all structures/functions used to manipulate the NFC sub-block
* that is available in the HSMC block
* - <soc>_nand_: all SoC specific structures/functions
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/genalloc.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/atmel-matrix.h>
#include <linux/mfd/syscon/atmel-smc.h>
#include <linux/module.h>
#include <linux/mtd/rawnand.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/iopoll.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <soc/at91/atmel-sfr.h>
#include "pmecc.h"
#define ATMEL_HSMC_NFC_CFG 0x0
#define ATMEL_HSMC_NFC_CFG_SPARESIZE(x) (((x) / 4) << 24)
#define ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK GENMASK(30, 24)
#define ATMEL_HSMC_NFC_CFG_DTO(cyc, mul) (((cyc) << 16) | ((mul) << 20))
#define ATMEL_HSMC_NFC_CFG_DTO_MAX GENMASK(22, 16)
#define ATMEL_HSMC_NFC_CFG_RBEDGE BIT(13)
#define ATMEL_HSMC_NFC_CFG_FALLING_EDGE BIT(12)
#define ATMEL_HSMC_NFC_CFG_RSPARE BIT(9)
#define ATMEL_HSMC_NFC_CFG_WSPARE BIT(8)
#define ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK GENMASK(2, 0)
#define ATMEL_HSMC_NFC_CFG_PAGESIZE(x) (fls((x) / 512) - 1)
#define ATMEL_HSMC_NFC_CTRL 0x4
#define ATMEL_HSMC_NFC_CTRL_EN BIT(0)
#define ATMEL_HSMC_NFC_CTRL_DIS BIT(1)
#define ATMEL_HSMC_NFC_SR 0x8
#define ATMEL_HSMC_NFC_IER 0xc
#define ATMEL_HSMC_NFC_IDR 0x10
#define ATMEL_HSMC_NFC_IMR 0x14
#define ATMEL_HSMC_NFC_SR_ENABLED BIT(1)
#define ATMEL_HSMC_NFC_SR_RB_RISE BIT(4)
#define ATMEL_HSMC_NFC_SR_RB_FALL BIT(5)
#define ATMEL_HSMC_NFC_SR_BUSY BIT(8)
#define ATMEL_HSMC_NFC_SR_WR BIT(11)
#define ATMEL_HSMC_NFC_SR_CSID GENMASK(14, 12)
#define ATMEL_HSMC_NFC_SR_XFRDONE BIT(16)
#define ATMEL_HSMC_NFC_SR_CMDDONE BIT(17)
#define ATMEL_HSMC_NFC_SR_DTOE BIT(20)
#define ATMEL_HSMC_NFC_SR_UNDEF BIT(21)
#define ATMEL_HSMC_NFC_SR_AWB BIT(22)
#define ATMEL_HSMC_NFC_SR_NFCASE BIT(23)
#define ATMEL_HSMC_NFC_SR_ERRORS (ATMEL_HSMC_NFC_SR_DTOE | \
ATMEL_HSMC_NFC_SR_UNDEF | \
ATMEL_HSMC_NFC_SR_AWB | \
ATMEL_HSMC_NFC_SR_NFCASE)
#define ATMEL_HSMC_NFC_SR_RBEDGE(x) BIT((x) + 24)
#define ATMEL_HSMC_NFC_ADDR 0x18
#define ATMEL_HSMC_NFC_BANK 0x1c
#define ATMEL_NFC_MAX_RB_ID 7
#define ATMEL_NFC_SRAM_SIZE 0x2400
#define ATMEL_NFC_CMD(pos, cmd) ((cmd) << (((pos) * 8) + 2))
#define ATMEL_NFC_VCMD2 BIT(18)
#define ATMEL_NFC_ACYCLE(naddrs) ((naddrs) << 19)
#define ATMEL_NFC_CSID(cs) ((cs) << 22)
#define ATMEL_NFC_DATAEN BIT(25)
#define ATMEL_NFC_NFCWR BIT(26)
#define ATMEL_NFC_MAX_ADDR_CYCLES 5
#define ATMEL_NAND_ALE_OFFSET BIT(21)
#define ATMEL_NAND_CLE_OFFSET BIT(22)
#define DEFAULT_TIMEOUT_MS 1000
#define MIN_DMA_LEN 128
static bool atmel_nand_avoid_dma __read_mostly;
MODULE_PARM_DESC(avoiddma, "Avoid using DMA");
module_param_named(avoiddma, atmel_nand_avoid_dma, bool, 0400);
enum atmel_nand_rb_type {
ATMEL_NAND_NO_RB,
ATMEL_NAND_NATIVE_RB,
ATMEL_NAND_GPIO_RB,
};
struct atmel_nand_rb {
enum atmel_nand_rb_type type;
union {
struct gpio_desc *gpio;
int id;
};
};
struct atmel_nand_cs {
int id;
struct atmel_nand_rb rb;
struct gpio_desc *csgpio;
struct {
void __iomem *virt;
dma_addr_t dma;
} io;
struct atmel_smc_cs_conf smcconf;
};
struct atmel_nand {
struct list_head node;
struct device *dev;
struct nand_chip base;
struct atmel_nand_cs *activecs;
struct atmel_pmecc_user *pmecc;
struct gpio_desc *cdgpio;
int numcs;
struct atmel_nand_cs cs[];
};
static inline struct atmel_nand *to_atmel_nand(struct nand_chip *chip)
{
return container_of(chip, struct atmel_nand, base);
}
enum atmel_nfc_data_xfer {
ATMEL_NFC_NO_DATA,
ATMEL_NFC_READ_DATA,
ATMEL_NFC_WRITE_DATA,
};
struct atmel_nfc_op {
u8 cs;
u8 ncmds;
u8 cmds[2];
u8 naddrs;
u8 addrs[5];
enum atmel_nfc_data_xfer data;
u32 wait;
u32 errors;
};
struct atmel_nand_controller;
struct atmel_nand_controller_caps;
struct atmel_nand_controller_ops {
int (*probe)(struct platform_device *pdev,
const struct atmel_nand_controller_caps *caps);
int (*remove)(struct atmel_nand_controller *nc);
void (*nand_init)(struct atmel_nand_controller *nc,
struct atmel_nand *nand);
int (*ecc_init)(struct nand_chip *chip);
int (*setup_interface)(struct atmel_nand *nand, int csline,
const struct nand_interface_config *conf);
int (*exec_op)(struct atmel_nand *nand,
const struct nand_operation *op, bool check_only);
};
struct atmel_nand_controller_caps {
bool has_dma;
bool legacy_of_bindings;
u32 ale_offs;
u32 cle_offs;
const char *ebi_csa_regmap_name;
const struct atmel_nand_controller_ops *ops;
};
struct atmel_nand_controller {
struct nand_controller base;
const struct atmel_nand_controller_caps *caps;
struct device *dev;
struct regmap *smc;
struct dma_chan *dmac;
struct atmel_pmecc *pmecc;
struct list_head chips;
struct clk *mck;
};
static inline struct atmel_nand_controller *
to_nand_controller(struct nand_controller *ctl)
{
return container_of(ctl, struct atmel_nand_controller, base);
}
struct atmel_smc_nand_ebi_csa_cfg {
u32 offs;
u32 nfd0_on_d16;
};
struct atmel_smc_nand_controller {
struct atmel_nand_controller base;
struct regmap *ebi_csa_regmap;
struct atmel_smc_nand_ebi_csa_cfg *ebi_csa;
};
static inline struct atmel_smc_nand_controller *
to_smc_nand_controller(struct nand_controller *ctl)
{
return container_of(to_nand_controller(ctl),
struct atmel_smc_nand_controller, base);
}
struct atmel_hsmc_nand_controller {
struct atmel_nand_controller base;
struct {
struct gen_pool *pool;
void __iomem *virt;
dma_addr_t dma;
} sram;
const struct atmel_hsmc_reg_layout *hsmc_layout;
struct regmap *io;
struct atmel_nfc_op op;
struct completion complete;
u32 cfg;
int irq;
/* Only used when instantiating from legacy DT bindings. */
struct clk *clk;
};
static inline struct atmel_hsmc_nand_controller *
to_hsmc_nand_controller(struct nand_controller *ctl)
{
return container_of(to_nand_controller(ctl),
struct atmel_hsmc_nand_controller, base);
}
static bool atmel_nfc_op_done(struct atmel_nfc_op *op, u32 status)
{
op->errors |= status & ATMEL_HSMC_NFC_SR_ERRORS;
op->wait ^= status & op->wait;
return !op->wait || op->errors;
}
static irqreturn_t atmel_nfc_interrupt(int irq, void *data)
{
struct atmel_hsmc_nand_controller *nc = data;
u32 sr, rcvd;
bool done;
regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &sr);
rcvd = sr & (nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
done = atmel_nfc_op_done(&nc->op, sr);
if (rcvd)
regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, rcvd);
if (done)
complete(&nc->complete);
return rcvd ? IRQ_HANDLED : IRQ_NONE;
}
static int atmel_nfc_wait(struct atmel_hsmc_nand_controller *nc, bool poll,
unsigned int timeout_ms)
{
int ret;
if (!timeout_ms)
timeout_ms = DEFAULT_TIMEOUT_MS;
if (poll) {
u32 status;
ret = regmap_read_poll_timeout(nc->base.smc,
ATMEL_HSMC_NFC_SR, status,
atmel_nfc_op_done(&nc->op,
status),
0, timeout_ms * 1000);
} else {
init_completion(&nc->complete);
regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IER,
nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
ret = wait_for_completion_timeout(&nc->complete,
msecs_to_jiffies(timeout_ms));
if (!ret)
ret = -ETIMEDOUT;
else
ret = 0;
regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
}
if (nc->op.errors & ATMEL_HSMC_NFC_SR_DTOE) {
dev_err(nc->base.dev, "Waiting NAND R/B Timeout\n");
ret = -ETIMEDOUT;
}
if (nc->op.errors & ATMEL_HSMC_NFC_SR_UNDEF) {
dev_err(nc->base.dev, "Access to an undefined area\n");
ret = -EIO;
}
if (nc->op.errors & ATMEL_HSMC_NFC_SR_AWB) {
dev_err(nc->base.dev, "Access while busy\n");
ret = -EIO;
}
if (nc->op.errors & ATMEL_HSMC_NFC_SR_NFCASE) {
dev_err(nc->base.dev, "Wrong access size\n");
ret = -EIO;
}
return ret;
}
static void atmel_nand_dma_transfer_finished(void *data)
{
struct completion *finished = data;
complete(finished);
}
static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
void *buf, dma_addr_t dev_dma, size_t len,
enum dma_data_direction dir)
{
DECLARE_COMPLETION_ONSTACK(finished);
dma_addr_t src_dma, dst_dma, buf_dma;
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
buf_dma = dma_map_single(nc->dev, buf, len, dir);
if (dma_mapping_error(nc->dev, dev_dma)) {
dev_err(nc->dev,
"Failed to prepare a buffer for DMA access\n");
goto err;
}
if (dir == DMA_FROM_DEVICE) {
src_dma = dev_dma;
dst_dma = buf_dma;
} else {
src_dma = buf_dma;
dst_dma = dev_dma;
}
tx = dmaengine_prep_dma_memcpy(nc->dmac, dst_dma, src_dma, len,
DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(nc->dev, "Failed to prepare DMA memcpy\n");
goto err_unmap;
}
tx->callback = atmel_nand_dma_transfer_finished;
tx->callback_param = &finished;
cookie = dmaengine_submit(tx);
if (dma_submit_error(cookie)) {
dev_err(nc->dev, "Failed to do DMA tx_submit\n");
goto err_unmap;
}
dma_async_issue_pending(nc->dmac);
wait_for_completion(&finished);
dma_unmap_single(nc->dev, buf_dma, len, dir);
return 0;
err_unmap:
dma_unmap_single(nc->dev, buf_dma, len, dir);
err:
dev_dbg(nc->dev, "Fall back to CPU I/O\n");
return -EIO;
}
static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll)
{
u8 *addrs = nc->op.addrs;
unsigned int op = 0;
u32 addr, val;
int i, ret;
nc->op.wait = ATMEL_HSMC_NFC_SR_CMDDONE;
for (i = 0; i < nc->op.ncmds; i++)
op |= ATMEL_NFC_CMD(i, nc->op.cmds[i]);
if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
regmap_write(nc->base.smc, ATMEL_HSMC_NFC_ADDR, *addrs++);
op |= ATMEL_NFC_CSID(nc->op.cs) |
ATMEL_NFC_ACYCLE(nc->op.naddrs);
if (nc->op.ncmds > 1)
op |= ATMEL_NFC_VCMD2;
addr = addrs[0] | (addrs[1] << 8) | (addrs[2] << 16) |
(addrs[3] << 24);
if (nc->op.data != ATMEL_NFC_NO_DATA) {
op |= ATMEL_NFC_DATAEN;
nc->op.wait |= ATMEL_HSMC_NFC_SR_XFRDONE;
if (nc->op.data == ATMEL_NFC_WRITE_DATA)
op |= ATMEL_NFC_NFCWR;
}
/* Clear all flags. */
regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &val);
/* Send the command. */
regmap_write(nc->io, op, addr);
ret = atmel_nfc_wait(nc, poll, 0);
if (ret)
dev_err(nc->base.dev,
"Failed to send NAND command (err = %d)!",
ret);
/* Reset the op state. */
memset(&nc->op, 0, sizeof(nc->op));
return ret;
}
static void atmel_nand_data_in(struct atmel_nand *nand, void *buf,
unsigned int len, bool force_8bit)
{
struct atmel_nand_controller *nc;
nc = to_nand_controller(nand->base.controller);
/*
* If the controller supports DMA, the buffer address is DMA-able and
* len is long enough to make DMA transfers profitable, let's trigger
* a DMA transfer. If it fails, fallback to PIO mode.
*/
if (nc->dmac && virt_addr_valid(buf) &&
len >= MIN_DMA_LEN && !force_8bit &&
!atmel_nand_dma_transfer(nc, buf, nand->activecs->io.dma, len,
DMA_FROM_DEVICE))
return;
if ((nand->base.options & NAND_BUSWIDTH_16) && !force_8bit)
ioread16_rep(nand->activecs->io.virt, buf, len / 2);
else
ioread8_rep(nand->activecs->io.virt, buf, len);
}
static void atmel_nand_data_out(struct atmel_nand *nand, const void *buf,
unsigned int len, bool force_8bit)
{
struct atmel_nand_controller *nc;
nc = to_nand_controller(nand->base.controller);
/*
* If the controller supports DMA, the buffer address is DMA-able and
* len is long enough to make DMA transfers profitable, let's trigger
* a DMA transfer. If it fails, fallback to PIO mode.
*/
if (nc->dmac && virt_addr_valid(buf) &&
len >= MIN_DMA_LEN && !force_8bit &&
!atmel_nand_dma_transfer(nc, (void *)buf, nand->activecs->io.dma,
len, DMA_TO_DEVICE))
return;
if ((nand->base.options & NAND_BUSWIDTH_16) && !force_8bit)
iowrite16_rep(nand->activecs->io.virt, buf, len / 2);
else
iowrite8_rep(nand->activecs->io.virt, buf, len);
}
static int atmel_nand_waitrdy(struct atmel_nand *nand, unsigned int timeout_ms)
{
if (nand->activecs->rb.type == ATMEL_NAND_NO_RB)
return nand_soft_waitrdy(&nand->base, timeout_ms);
return nand_gpio_waitrdy(&nand->base, nand->activecs->rb.gpio,
timeout_ms);
}
static int atmel_hsmc_nand_waitrdy(struct atmel_nand *nand,
unsigned int timeout_ms)
{
struct atmel_hsmc_nand_controller *nc;
u32 status, mask;
if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
return atmel_nand_waitrdy(nand, timeout_ms);
nc = to_hsmc_nand_controller(nand->base.controller);
mask = ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
return regmap_read_poll_timeout_atomic(nc->base.smc, ATMEL_HSMC_NFC_SR,
status, status & mask,
10, timeout_ms * 1000);
}
static void atmel_nand_select_target(struct atmel_nand *nand,
unsigned int cs)
{
nand->activecs = &nand->cs[cs];
}
static void atmel_hsmc_nand_select_target(struct atmel_nand *nand,
unsigned int cs)
{
struct mtd_info *mtd = nand_to_mtd(&nand->base);
struct atmel_hsmc_nand_controller *nc;
u32 cfg = ATMEL_HSMC_NFC_CFG_PAGESIZE(mtd->writesize) |
ATMEL_HSMC_NFC_CFG_SPARESIZE(mtd->oobsize) |
ATMEL_HSMC_NFC_CFG_RSPARE;
nand->activecs = &nand->cs[cs];
nc = to_hsmc_nand_controller(nand->base.controller);
if (nc->cfg == cfg)
return;
regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG,
ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK |
ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK |
ATMEL_HSMC_NFC_CFG_RSPARE |
ATMEL_HSMC_NFC_CFG_WSPARE,
cfg);
nc->cfg = cfg;
}
static int atmel_smc_nand_exec_instr(struct atmel_nand *nand,
const struct nand_op_instr *instr)
{
struct atmel_nand_controller *nc;
unsigned int i;
nc = to_nand_controller(nand->base.controller);
switch (instr->type) {
case NAND_OP_CMD_INSTR:
writeb(instr->ctx.cmd.opcode,
nand->activecs->io.virt + nc->caps->cle_offs);
return 0;
case NAND_OP_ADDR_INSTR:
for (i = 0; i < instr->ctx.addr.naddrs; i++)
writeb(instr->ctx.addr.addrs[i],
nand->activecs->io.virt + nc->caps->ale_offs);
return 0;
case NAND_OP_DATA_IN_INSTR:
atmel_nand_data_in(nand, instr->ctx.data.buf.in,
instr->ctx.data.len,
instr->ctx.data.force_8bit);
return 0;
case NAND_OP_DATA_OUT_INSTR:
atmel_nand_data_out(nand, instr->ctx.data.buf.out,
instr->ctx.data.len,
instr->ctx.data.force_8bit);
return 0;
case NAND_OP_WAITRDY_INSTR:
return atmel_nand_waitrdy(nand,
instr->ctx.waitrdy.timeout_ms);
default:
break;
}
return -EINVAL;
}
static int atmel_smc_nand_exec_op(struct atmel_nand *nand,
const struct nand_operation *op,
bool check_only)
{
unsigned int i;
int ret = 0;
if (check_only)
return 0;
atmel_nand_select_target(nand, op->cs);
gpiod_set_value(nand->activecs->csgpio, 0);
for (i = 0; i < op->ninstrs; i++) {
ret = atmel_smc_nand_exec_instr(nand, &op->instrs[i]);
if (ret)
break;
}
gpiod_set_value(nand->activecs->csgpio, 1);
return ret;
}
static int atmel_hsmc_exec_cmd_addr(struct nand_chip *chip,
const struct nand_subop *subop)
{
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_hsmc_nand_controller *nc;
unsigned int i, j;
nc = to_hsmc_nand_controller(chip->controller);
nc->op.cs = nand->activecs->id;
for (i = 0; i < subop->ninstrs; i++) {
const struct nand_op_instr *instr = &subop->instrs[i];
if (instr->type == NAND_OP_CMD_INSTR) {
nc->op.cmds[nc->op.ncmds++] = instr->ctx.cmd.opcode;
continue;
}
for (j = nand_subop_get_addr_start_off(subop, i);
j < nand_subop_get_num_addr_cyc(subop, i); j++) {
nc->op.addrs[nc->op.naddrs] = instr->ctx.addr.addrs[j];
nc->op.naddrs++;
}
}
return atmel_nfc_exec_op(nc, true);
}
static int atmel_hsmc_exec_rw(struct nand_chip *chip,
const struct nand_subop *subop)
{
const struct nand_op_instr *instr = subop->instrs;
struct atmel_nand *nand = to_atmel_nand(chip);
if (instr->type == NAND_OP_DATA_IN_INSTR)
atmel_nand_data_in(nand, instr->ctx.data.buf.in,
instr->ctx.data.len,
instr->ctx.data.force_8bit);
else
atmel_nand_data_out(nand, instr->ctx.data.buf.out,
instr->ctx.data.len,
instr->ctx.data.force_8bit);
return 0;
}
static int atmel_hsmc_exec_waitrdy(struct nand_chip *chip,
const struct nand_subop *subop)
{
const struct nand_op_instr *instr = subop->instrs;
struct atmel_nand *nand = to_atmel_nand(chip);
return atmel_hsmc_nand_waitrdy(nand, instr->ctx.waitrdy.timeout_ms);
}
static const struct nand_op_parser atmel_hsmc_op_parser = NAND_OP_PARSER(
NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_cmd_addr,
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_ADDR_ELEM(true, 5),
NAND_OP_PARSER_PAT_CMD_ELEM(true)),
NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_rw,
NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_rw,
NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0)),
NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_waitrdy,
NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
);
static int atmel_hsmc_nand_exec_op(struct atmel_nand *nand,
const struct nand_operation *op,
bool check_only)
{
int ret;
if (check_only)
return nand_op_parser_exec_op(&nand->base,
&atmel_hsmc_op_parser, op, true);
atmel_hsmc_nand_select_target(nand, op->cs);
ret = nand_op_parser_exec_op(&nand->base, &atmel_hsmc_op_parser, op,
false);
return ret;
}
static void atmel_nfc_copy_to_sram(struct nand_chip *chip, const u8 *buf,
bool oob_required)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct atmel_hsmc_nand_controller *nc;
int ret = -EIO;
nc = to_hsmc_nand_controller(chip->controller);
if (nc->base.dmac)
ret = atmel_nand_dma_transfer(&nc->base, (void *)buf,
nc->sram.dma, mtd->writesize,
DMA_TO_DEVICE);
/* Falling back to CPU copy. */
if (ret)
memcpy_toio(nc->sram.virt, buf, mtd->writesize);
if (oob_required)
memcpy_toio(nc->sram.virt + mtd->writesize, chip->oob_poi,
mtd->oobsize);
}
static void atmel_nfc_copy_from_sram(struct nand_chip *chip, u8 *buf,
bool oob_required)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct atmel_hsmc_nand_controller *nc;
int ret = -EIO;
nc = to_hsmc_nand_controller(chip->controller);
if (nc->base.dmac)
ret = atmel_nand_dma_transfer(&nc->base, buf, nc->sram.dma,
mtd->writesize, DMA_FROM_DEVICE);
/* Falling back to CPU copy. */
if (ret)
memcpy_fromio(buf, nc->sram.virt, mtd->writesize);
if (oob_required)
memcpy_fromio(chip->oob_poi, nc->sram.virt + mtd->writesize,
mtd->oobsize);
}
static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct atmel_hsmc_nand_controller *nc;
nc = to_hsmc_nand_controller(chip->controller);
if (column >= 0) {
nc->op.addrs[nc->op.naddrs++] = column;
/*
* 2 address cycles for the column offset on large page NANDs.
*/
if (mtd->writesize > 512)
nc->op.addrs[nc->op.naddrs++] = column >> 8;
}
if (page >= 0) {
nc->op.addrs[nc->op.naddrs++] = page;
nc->op.addrs[nc->op.naddrs++] = page >> 8;
if (chip->options & NAND_ROW_ADDR_3)
nc->op.addrs[nc->op.naddrs++] = page >> 16;
}
}
static int atmel_nand_pmecc_enable(struct nand_chip *chip, int op, bool raw)
{
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_nand_controller *nc;
int ret;
nc = to_nand_controller(chip->controller);
if (raw)
return 0;
ret = atmel_pmecc_enable(nand->pmecc, op);
if (ret)
dev_err(nc->dev,
"Failed to enable ECC engine (err = %d)\n", ret);
return ret;
}
static void atmel_nand_pmecc_disable(struct nand_chip *chip, bool raw)
{
struct atmel_nand *nand = to_atmel_nand(chip);
if (!raw)
atmel_pmecc_disable(nand->pmecc);
}
static int atmel_nand_pmecc_generate_eccbytes(struct nand_chip *chip, bool raw)
{
struct atmel_nand *nand = to_atmel_nand(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
struct atmel_nand_controller *nc;
struct mtd_oob_region oobregion;
void *eccbuf;
int ret, i;
nc = to_nand_controller(chip->controller);
if (raw)
return 0;
ret = atmel_pmecc_wait_rdy(nand->pmecc);
if (ret) {
dev_err(nc->dev,
"Failed to transfer NAND page data (err = %d)\n",
ret);
return ret;
}
mtd_ooblayout_ecc(mtd, 0, &oobregion);
eccbuf = chip->oob_poi + oobregion.offset;
for (i = 0; i < chip->ecc.steps; i++) {
atmel_pmecc_get_generated_eccbytes(nand->pmecc, i,
eccbuf);
eccbuf += chip->ecc.bytes;
}
return 0;
}
static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
bool raw)
{
struct atmel_nand *nand = to_atmel_nand(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
struct atmel_nand_controller *nc;
struct mtd_oob_region oobregion;
int ret, i, max_bitflips = 0;
void *databuf, *eccbuf;
nc = to_nand_controller(chip->controller);
if (raw)
return 0;
ret = atmel_pmecc_wait_rdy(nand->pmecc);
if (ret) {
dev_err(nc->dev,
"Failed to read NAND page data (err = %d)\n",
ret);
return ret;
}
mtd_ooblayout_ecc(mtd, 0, &oobregion);
eccbuf = chip->oob_poi + oobregion.offset;
databuf = buf;
for (i = 0; i < chip->ecc.steps; i++) {
ret = atmel_pmecc_correct_sector(nand->pmecc, i, databuf,
eccbuf);
if (ret < 0 && !atmel_pmecc_correct_erased_chunks(nand->pmecc))
ret = nand_check_erased_ecc_chunk(databuf,
chip->ecc.size,
eccbuf,
chip->ecc.bytes,
NULL, 0,
chip->ecc.strength);
if (ret >= 0) {
mtd->ecc_stats.corrected += ret;
max_bitflips = max(ret, max_bitflips);
} else {
mtd->ecc_stats.failed++;
}
databuf += chip->ecc.size;
eccbuf += chip->ecc.bytes;
}
return max_bitflips;
}
static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
bool oob_required, int page, bool raw)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct atmel_nand *nand = to_atmel_nand(chip);
int ret;
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
if (ret)
return ret;
nand_write_data_op(chip, buf, mtd->writesize, false);
ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
if (ret) {
atmel_pmecc_disable(nand->pmecc);
return ret;
}
atmel_nand_pmecc_disable(chip, raw);
nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
return nand_prog_page_end_op(chip);
}
static int atmel_nand_pmecc_write_page(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, false);
}
static int atmel_nand_pmecc_write_page_raw(struct nand_chip *chip,
const u8 *buf, int oob_required,
int page)
{
return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, true);
}
static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
bool oob_required, int page, bool raw)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
nand_read_page_op(chip, page, 0, NULL, 0);
ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
if (ret)
return ret;
ret = nand_read_data_op(chip, buf, mtd->writesize, false, false);
if (ret)
goto out_disable;
ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, false);
if (ret)
goto out_disable;
ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
out_disable:
atmel_nand_pmecc_disable(chip, raw);
return ret;
}
static int atmel_nand_pmecc_read_page(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, false);
}
static int atmel_nand_pmecc_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, true);
}
static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
const u8 *buf, bool oob_required,
int page, bool raw)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_hsmc_nand_controller *nc;
int ret;
atmel_hsmc_nand_select_target(nand, chip->cur_cs);
nc = to_hsmc_nand_controller(chip->controller);
atmel_nfc_copy_to_sram(chip, buf, false);
nc->op.cmds[0] = NAND_CMD_SEQIN;
nc->op.ncmds = 1;
atmel_nfc_set_op_addr(chip, page, 0x0);
nc->op.cs = nand->activecs->id;
nc->op.data = ATMEL_NFC_WRITE_DATA;
ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
if (ret)
return ret;
ret = atmel_nfc_exec_op(nc, false);
if (ret) {
atmel_nand_pmecc_disable(chip, raw);
dev_err(nc->base.dev,
"Failed to transfer NAND page data (err = %d)\n",
ret);
return ret;
}
ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
atmel_nand_pmecc_disable(chip, raw);
if (ret)
return ret;
nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
return nand_prog_page_end_op(chip);
}
static int atmel_hsmc_nand_pmecc_write_page(struct nand_chip *chip,
const u8 *buf, int oob_required,
int page)
{
return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
false);
}
static int atmel_hsmc_nand_pmecc_write_page_raw(struct nand_chip *chip,
const u8 *buf,
int oob_required, int page)
{
return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
true);
}
static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
bool oob_required, int page,
bool raw)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_hsmc_nand_controller *nc;
int ret;
atmel_hsmc_nand_select_target(nand, chip->cur_cs);
nc = to_hsmc_nand_controller(chip->controller);
/*
* Optimized read page accessors only work when the NAND R/B pin is
* connected to a native SoC R/B pin. If that's not the case, fallback
* to the non-optimized one.
*/
if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
raw);
nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READ0;
if (mtd->writesize > 512)
nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READSTART;
atmel_nfc_set_op_addr(chip, page, 0x0);
nc->op.cs = nand->activecs->id;
nc->op.data = ATMEL_NFC_READ_DATA;
ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
if (ret)
return ret;
ret = atmel_nfc_exec_op(nc, false);
if (ret) {
atmel_nand_pmecc_disable(chip, raw);
dev_err(nc->base.dev,
"Failed to load NAND page data (err = %d)\n",
ret);
return ret;
}
atmel_nfc_copy_from_sram(chip, buf, true);
ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
atmel_nand_pmecc_disable(chip, raw);
return ret;
}
static int atmel_hsmc_nand_pmecc_read_page(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
false);
}
static int atmel_hsmc_nand_pmecc_read_page_raw(struct nand_chip *chip,
u8 *buf, int oob_required,
int page)
{
return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
true);
}
static int atmel_nand_pmecc_init(struct nand_chip *chip)
{
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(&chip->base);
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_device *nanddev = mtd_to_nanddev(mtd);
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_nand_controller *nc;
struct atmel_pmecc_user_req req;
nc = to_nand_controller(chip->controller);
if (!nc->pmecc) {
dev_err(nc->dev, "HW ECC not supported\n");
return -ENOTSUPP;
}
if (nc->caps->legacy_of_bindings) {
u32 val;
if (!of_property_read_u32(nc->dev->of_node, "atmel,pmecc-cap",
&val))
chip->ecc.strength = val;
if (!of_property_read_u32(nc->dev->of_node,
"atmel,pmecc-sector-size",
&val))
chip->ecc.size = val;
}
if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
else if (chip->ecc.strength)
req.ecc.strength = chip->ecc.strength;
else if (requirements->strength)
req.ecc.strength = requirements->strength;
else
req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
if (chip->ecc.size)
req.ecc.sectorsize = chip->ecc.size;
else if (requirements->step_size)
req.ecc.sectorsize = requirements->step_size;
else
req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO;
req.pagesize = mtd->writesize;
req.oobsize = mtd->oobsize;
if (mtd->writesize <= 512) {
req.ecc.bytes = 4;
req.ecc.ooboffset = 0;
} else {
req.ecc.bytes = mtd->oobsize - 2;
req.ecc.ooboffset = ATMEL_PMECC_OOBOFFSET_AUTO;
}
nand->pmecc = atmel_pmecc_create_user(nc->pmecc, &req);
if (IS_ERR(nand->pmecc))
return PTR_ERR(nand->pmecc);
chip->ecc.algo = NAND_ECC_ALGO_BCH;
chip->ecc.size = req.ecc.sectorsize;
chip->ecc.bytes = req.ecc.bytes / req.ecc.nsectors;
chip->ecc.strength = req.ecc.strength;
chip->options |= NAND_NO_SUBPAGE_WRITE;
mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
return 0;
}
static int atmel_nand_ecc_init(struct nand_chip *chip)
{
struct atmel_nand_controller *nc;
int ret;
nc = to_nand_controller(chip->controller);
switch (chip->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_NONE:
case NAND_ECC_ENGINE_TYPE_SOFT:
/*
* Nothing to do, the core will initialize everything for us.
*/
break;
case NAND_ECC_ENGINE_TYPE_ON_HOST:
ret = atmel_nand_pmecc_init(chip);
if (ret)
return ret;
chip->ecc.read_page = atmel_nand_pmecc_read_page;
chip->ecc.write_page = atmel_nand_pmecc_write_page;
chip->ecc.read_page_raw = atmel_nand_pmecc_read_page_raw;
chip->ecc.write_page_raw = atmel_nand_pmecc_write_page_raw;
break;
default:
/* Other modes are not supported. */
dev_err(nc->dev, "Unsupported ECC mode: %d\n",
chip->ecc.engine_type);
return -ENOTSUPP;
}
return 0;
}
static int atmel_hsmc_nand_ecc_init(struct nand_chip *chip)
{
int ret;
ret = atmel_nand_ecc_init(chip);
if (ret)
return ret;
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return 0;
/* Adjust the ECC operations for the HSMC IP. */
chip->ecc.read_page = atmel_hsmc_nand_pmecc_read_page;
chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
return 0;
}
static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
const struct nand_interface_config *conf,
struct atmel_smc_cs_conf *smcconf)
{
u32 ncycles, totalcycles, timeps, mckperiodps;
struct atmel_nand_controller *nc;
int ret;
nc = to_nand_controller(nand->base.controller);
/* DDR interface not supported. */
if (!nand_interface_is_sdr(conf))
return -ENOTSUPP;
/*
* tRC < 30ns implies EDO mode. This controller does not support this
* mode.
*/
if (conf->timings.sdr.tRC_min < 30000)
return -ENOTSUPP;
atmel_smc_cs_conf_init(smcconf);
mckperiodps = NSEC_PER_SEC / clk_get_rate(nc->mck);
mckperiodps *= 1000;
/*
* Set write pulse timing. This one is easy to extract:
*
* NWE_PULSE = tWP
*/
ncycles = DIV_ROUND_UP(conf->timings.sdr.tWP_min, mckperiodps);
totalcycles = ncycles;
ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NWE_SHIFT,
ncycles);
if (ret)
return ret;
/*
* The write setup timing depends on the operation done on the NAND.
* All operations goes through the same data bus, but the operation
* type depends on the address we are writing to (ALE/CLE address
* lines).
* Since we have no way to differentiate the different operations at
* the SMC level, we must consider the worst case (the biggest setup
* time among all operation types):
*
* NWE_SETUP = max(tCLS, tCS, tALS, tDS) - NWE_PULSE
*/
timeps = max3(conf->timings.sdr.tCLS_min, conf->timings.sdr.tCS_min,
conf->timings.sdr.tALS_min);
timeps = max(timeps, conf->timings.sdr.tDS_min);
ncycles = DIV_ROUND_UP(timeps, mckperiodps);
ncycles = ncycles > totalcycles ? ncycles - totalcycles : 0;
totalcycles += ncycles;
ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NWE_SHIFT,
ncycles);
if (ret)
return ret;
/*
* As for the write setup timing, the write hold timing depends on the
* operation done on the NAND:
*
* NWE_HOLD = max(tCLH, tCH, tALH, tDH, tWH)
*/
timeps = max3(conf->timings.sdr.tCLH_min, conf->timings.sdr.tCH_min,
conf->timings.sdr.tALH_min);
timeps = max3(timeps, conf->timings.sdr.tDH_min,
conf->timings.sdr.tWH_min);
ncycles = DIV_ROUND_UP(timeps, mckperiodps);
totalcycles += ncycles;
/*
* The write cycle timing is directly matching tWC, but is also
* dependent on the other timings on the setup and hold timings we
* calculated earlier, which gives:
*
* NWE_CYCLE = max(tWC, NWE_SETUP + NWE_PULSE + NWE_HOLD)
*/
ncycles = DIV_ROUND_UP(conf->timings.sdr.tWC_min, mckperiodps);
ncycles = max(totalcycles, ncycles);
ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NWE_SHIFT,
ncycles);
if (ret)
return ret;
/*
* We don't want the CS line to be toggled between each byte/word
* transfer to the NAND. The only way to guarantee that is to have the
* NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
*
* NCS_WR_PULSE = NWE_CYCLE
*/
ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_WR_SHIFT,
ncycles);
if (ret)
return ret;
/*
* As for the write setup timing, the read hold timing depends on the
* operation done on the NAND:
*
* NRD_HOLD = max(tREH, tRHOH)
*/
timeps = max(conf->timings.sdr.tREH_min, conf->timings.sdr.tRHOH_min);
ncycles = DIV_ROUND_UP(timeps, mckperiodps);
totalcycles = ncycles;
/*
* TDF = tRHZ - NRD_HOLD
*/
ncycles = DIV_ROUND_UP(conf->timings.sdr.tRHZ_max, mckperiodps);
ncycles -= totalcycles;
/*
* In ONFI 4.0 specs, tRHZ has been increased to support EDO NANDs and
* we might end up with a config that does not fit in the TDF field.
* Just take the max value in this case and hope that the NAND is more
* tolerant than advertised.
*/
if (ncycles > ATMEL_SMC_MODE_TDF_MAX)
ncycles = ATMEL_SMC_MODE_TDF_MAX;
else if (ncycles < ATMEL_SMC_MODE_TDF_MIN)
ncycles = ATMEL_SMC_MODE_TDF_MIN;
smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles) |
ATMEL_SMC_MODE_TDFMODE_OPTIMIZED;
/*
* Read pulse timing directly matches tRP:
*
* NRD_PULSE = tRP
*/
ncycles = DIV_ROUND_UP(conf->timings.sdr.tRP_min, mckperiodps);
totalcycles += ncycles;
ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NRD_SHIFT,
ncycles);
if (ret)
return ret;
/*
* The write cycle timing is directly matching tWC, but is also
* dependent on the setup and hold timings we calculated earlier,
* which gives:
*
* NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
*
* NRD_SETUP is always 0.
*/
ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
ncycles = max(totalcycles, ncycles);
ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NRD_SHIFT,
ncycles);
if (ret)
return ret;
/*
* We don't want the CS line to be toggled between each byte/word
* transfer from the NAND. The only way to guarantee that is to have
* the NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
*
* NCS_RD_PULSE = NRD_CYCLE
*/
ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_RD_SHIFT,
ncycles);
if (ret)
return ret;
/* Txxx timings are directly matching tXXX ones. */
ncycles = DIV_ROUND_UP(conf->timings.sdr.tCLR_min, mckperiodps);
ret = atmel_smc_cs_conf_set_timing(smcconf,
ATMEL_HSMC_TIMINGS_TCLR_SHIFT,
ncycles);
if (ret)
return ret;
ncycles = DIV_ROUND_UP(conf->timings.sdr.tADL_min, mckperiodps);
ret = atmel_smc_cs_conf_set_timing(smcconf,
ATMEL_HSMC_TIMINGS_TADL_SHIFT,
ncycles);
/*
* Version 4 of the ONFI spec mandates that tADL be at least 400
* nanoseconds, but, depending on the master clock rate, 400 ns may not
* fit in the tADL field of the SMC reg. We need to relax the check and
* accept the -ERANGE return code.
*
* Note that previous versions of the ONFI spec had a lower tADL_min
* (100 or 200 ns). It's not clear why this timing constraint got
* increased but it seems most NANDs are fine with values lower than
* 400ns, so we should be safe.
*/
if (ret && ret != -ERANGE)
return ret;
ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps);
ret = atmel_smc_cs_conf_set_timing(smcconf,
ATMEL_HSMC_TIMINGS_TAR_SHIFT,
ncycles);
if (ret)
return ret;
ncycles = DIV_ROUND_UP(conf->timings.sdr.tRR_min, mckperiodps);
ret = atmel_smc_cs_conf_set_timing(smcconf,
ATMEL_HSMC_TIMINGS_TRR_SHIFT,
ncycles);
if (ret)
return ret;
ncycles = DIV_ROUND_UP(conf->timings.sdr.tWB_max, mckperiodps);
ret = atmel_smc_cs_conf_set_timing(smcconf,
ATMEL_HSMC_TIMINGS_TWB_SHIFT,
ncycles);
if (ret)
return ret;
/* Attach the CS line to the NFC logic. */
smcconf->timings |= ATMEL_HSMC_TIMINGS_NFSEL;
/* Set the appropriate data bus width. */
if (nand->base.options & NAND_BUSWIDTH_16)
smcconf->mode |= ATMEL_SMC_MODE_DBW_16;
/* Operate in NRD/NWE READ/WRITEMODE. */
smcconf->mode |= ATMEL_SMC_MODE_READMODE_NRD |
ATMEL_SMC_MODE_WRITEMODE_NWE;
return 0;
}
static int atmel_smc_nand_setup_interface(struct atmel_nand *nand,
int csline,
const struct nand_interface_config *conf)
{
struct atmel_nand_controller *nc;
struct atmel_smc_cs_conf smcconf;
struct atmel_nand_cs *cs;
int ret;
nc = to_nand_controller(nand->base.controller);
ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
if (ret)
return ret;
if (csline == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
cs = &nand->cs[csline];
cs->smcconf = smcconf;
atmel_smc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf);
return 0;
}
static int atmel_hsmc_nand_setup_interface(struct atmel_nand *nand,
int csline,
const struct nand_interface_config *conf)
{
struct atmel_hsmc_nand_controller *nc;
struct atmel_smc_cs_conf smcconf;
struct atmel_nand_cs *cs;
int ret;
nc = to_hsmc_nand_controller(nand->base.controller);
ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
if (ret)
return ret;
if (csline == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
cs = &nand->cs[csline];
cs->smcconf = smcconf;
if (cs->rb.type == ATMEL_NAND_NATIVE_RB)
cs->smcconf.timings |= ATMEL_HSMC_TIMINGS_RBNSEL(cs->rb.id);
atmel_hsmc_cs_conf_apply(nc->base.smc, nc->hsmc_layout, cs->id,
&cs->smcconf);
return 0;
}
static int atmel_nand_setup_interface(struct nand_chip *chip, int csline,
const struct nand_interface_config *conf)
{
struct atmel_nand *nand = to_atmel_nand(chip);
const struct nand_sdr_timings *sdr;
struct atmel_nand_controller *nc;
sdr = nand_get_sdr_timings(conf);
if (IS_ERR(sdr))
return PTR_ERR(sdr);
nc = to_nand_controller(nand->base.controller);
if (csline >= nand->numcs ||
(csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY))
return -EINVAL;
return nc->caps->ops->setup_interface(nand, csline, conf);
}
static int atmel_nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_nand_controller *nc;
nc = to_nand_controller(nand->base.controller);
return nc->caps->ops->exec_op(nand, op, check_only);
}
static void atmel_nand_init(struct atmel_nand_controller *nc,
struct atmel_nand *nand)
{
struct nand_chip *chip = &nand->base;
struct mtd_info *mtd = nand_to_mtd(chip);
mtd->dev.parent = nc->dev;
nand->base.controller = &nc->base;
if (!nc->mck || !nc->caps->ops->setup_interface)
chip->options |= NAND_KEEP_TIMINGS;
/*
* Use a bounce buffer when the buffer passed by the MTD user is not
* suitable for DMA.
*/
if (nc->dmac)
chip->options |= NAND_USES_DMA;
/* Default to HW ECC if pmecc is available. */
if (nc->pmecc)
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
}
static void atmel_smc_nand_init(struct atmel_nand_controller *nc,
struct atmel_nand *nand)
{
struct nand_chip *chip = &nand->base;
struct atmel_smc_nand_controller *smc_nc;
int i;
atmel_nand_init(nc, nand);
smc_nc = to_smc_nand_controller(chip->controller);
if (!smc_nc->ebi_csa_regmap)
return;
/* Attach the CS to the NAND Flash logic. */
for (i = 0; i < nand->numcs; i++)
regmap_update_bits(smc_nc->ebi_csa_regmap,
smc_nc->ebi_csa->offs,
BIT(nand->cs[i].id), BIT(nand->cs[i].id));
if (smc_nc->ebi_csa->nfd0_on_d16)
regmap_update_bits(smc_nc->ebi_csa_regmap,
smc_nc->ebi_csa->offs,
smc_nc->ebi_csa->nfd0_on_d16,
smc_nc->ebi_csa->nfd0_on_d16);
}
static int atmel_nand_controller_remove_nand(struct atmel_nand *nand)
{
struct nand_chip *chip = &nand->base;
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
ret = mtd_device_unregister(mtd);
if (ret)
return ret;
nand_cleanup(chip);
list_del(&nand->node);
return 0;
}
static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
struct device_node *np,
int reg_cells)
{
struct atmel_nand *nand;
struct gpio_desc *gpio;
int numcs, ret, i;
numcs = of_property_count_elems_of_size(np, "reg",
reg_cells * sizeof(u32));
if (numcs < 1) {
dev_err(nc->dev, "Missing or invalid reg property\n");
return ERR_PTR(-EINVAL);
}
nand = devm_kzalloc(nc->dev, struct_size(nand, cs, numcs), GFP_KERNEL);
if (!nand)
return ERR_PTR(-ENOMEM);
nand->numcs = numcs;
gpio = devm_fwnode_gpiod_get(nc->dev, of_fwnode_handle(np),
"det", GPIOD_IN, "nand-det");
if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
dev_err(nc->dev,
"Failed to get detect gpio (err = %ld)\n",
PTR_ERR(gpio));
return ERR_CAST(gpio);
}
if (!IS_ERR(gpio))
nand->cdgpio = gpio;
for (i = 0; i < numcs; i++) {
struct resource res;
u32 val;
ret = of_address_to_resource(np, 0, &res);
if (ret) {
dev_err(nc->dev, "Invalid reg property (err = %d)\n",
ret);
return ERR_PTR(ret);
}
ret = of_property_read_u32_index(np, "reg", i * reg_cells,
&val);
if (ret) {
dev_err(nc->dev, "Invalid reg property (err = %d)\n",
ret);
return ERR_PTR(ret);
}
nand->cs[i].id = val;
nand->cs[i].io.dma = res.start;
nand->cs[i].io.virt = devm_ioremap_resource(nc->dev, &res);
if (IS_ERR(nand->cs[i].io.virt))
return ERR_CAST(nand->cs[i].io.virt);
if (!of_property_read_u32(np, "atmel,rb", &val)) {
if (val > ATMEL_NFC_MAX_RB_ID)
return ERR_PTR(-EINVAL);
nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
nand->cs[i].rb.id = val;
} else {
gpio = devm_fwnode_gpiod_get_index(nc->dev,
of_fwnode_handle(np),
"rb", i, GPIOD_IN,
"nand-rb");
if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
dev_err(nc->dev,
"Failed to get R/B gpio (err = %ld)\n",
PTR_ERR(gpio));
return ERR_CAST(gpio);
}
if (!IS_ERR(gpio)) {
nand->cs[i].rb.type = ATMEL_NAND_GPIO_RB;
nand->cs[i].rb.gpio = gpio;
}
}
gpio = devm_fwnode_gpiod_get_index(nc->dev,
of_fwnode_handle(np),
"cs", i, GPIOD_OUT_HIGH,
"nand-cs");
if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
dev_err(nc->dev,
"Failed to get CS gpio (err = %ld)\n",
PTR_ERR(gpio));
return ERR_CAST(gpio);
}
if (!IS_ERR(gpio))
nand->cs[i].csgpio = gpio;
}
nand_set_flash_node(&nand->base, np);
return nand;
}
static int
atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
struct atmel_nand *nand)
{
struct nand_chip *chip = &nand->base;
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
/* No card inserted, skip this NAND. */
if (nand->cdgpio && gpiod_get_value(nand->cdgpio)) {
dev_info(nc->dev, "No SmartMedia card inserted.\n");
return 0;
}
nc->caps->ops->nand_init(nc, nand);
ret = nand_scan(chip, nand->numcs);
if (ret) {
dev_err(nc->dev, "NAND scan failed: %d\n", ret);
return ret;
}
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(nc->dev, "Failed to register mtd device: %d\n", ret);
nand_cleanup(chip);
return ret;
}
list_add_tail(&nand->node, &nc->chips);
return 0;
}
static int
atmel_nand_controller_remove_nands(struct atmel_nand_controller *nc)
{
struct atmel_nand *nand, *tmp;
int ret;
list_for_each_entry_safe(nand, tmp, &nc->chips, node) {
ret = atmel_nand_controller_remove_nand(nand);
if (ret)
return ret;
}
return 0;
}
static int
atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
{
struct device *dev = nc->dev;
struct platform_device *pdev = to_platform_device(dev);
struct atmel_nand *nand;
struct gpio_desc *gpio;
struct resource *res;
/*
* Legacy bindings only allow connecting a single NAND with a unique CS
* line to the controller.
*/
nand = devm_kzalloc(nc->dev, sizeof(*nand) + sizeof(*nand->cs),
GFP_KERNEL);
if (!nand)
return -ENOMEM;
nand->numcs = 1;
nand->cs[0].io.virt = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(nand->cs[0].io.virt))
return PTR_ERR(nand->cs[0].io.virt);
nand->cs[0].io.dma = res->start;
/*
* The old driver was hardcoding the CS id to 3 for all sama5
* controllers. Since this id is only meaningful for the sama5
* controller we can safely assign this id to 3 no matter the
* controller.
* If one wants to connect a NAND to a different CS line, he will
* have to use the new bindings.
*/
nand->cs[0].id = 3;
/* R/B GPIO. */
gpio = devm_gpiod_get_index_optional(dev, NULL, 0, GPIOD_IN);
if (IS_ERR(gpio)) {
dev_err(dev, "Failed to get R/B gpio (err = %ld)\n",
PTR_ERR(gpio));
return PTR_ERR(gpio);
}
if (gpio) {
nand->cs[0].rb.type = ATMEL_NAND_GPIO_RB;
nand->cs[0].rb.gpio = gpio;
}
/* CS GPIO. */
gpio = devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_HIGH);
if (IS_ERR(gpio)) {
dev_err(dev, "Failed to get CS gpio (err = %ld)\n",
PTR_ERR(gpio));
return PTR_ERR(gpio);
}
nand->cs[0].csgpio = gpio;
/* Card detect GPIO. */
gpio = devm_gpiod_get_index_optional(nc->dev, NULL, 2, GPIOD_IN);
if (IS_ERR(gpio)) {
dev_err(dev,
"Failed to get detect gpio (err = %ld)\n",
PTR_ERR(gpio));
return PTR_ERR(gpio);
}
nand->cdgpio = gpio;
nand_set_flash_node(&nand->base, nc->dev->of_node);
return atmel_nand_controller_add_nand(nc, nand);
}
static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
{
struct device_node *np, *nand_np;
struct device *dev = nc->dev;
int ret, reg_cells;
u32 val;
/* We do not retrieve the SMC syscon when parsing old DTs. */
if (nc->caps->legacy_of_bindings)
return atmel_nand_controller_legacy_add_nands(nc);
np = dev->of_node;
ret = of_property_read_u32(np, "#address-cells", &val);
if (ret) {
dev_err(dev, "missing #address-cells property\n");
return ret;
}
reg_cells = val;
ret = of_property_read_u32(np, "#size-cells", &val);
if (ret) {
dev_err(dev, "missing #size-cells property\n");
return ret;
}
reg_cells += val;
for_each_child_of_node(np, nand_np) {
struct atmel_nand *nand;
nand = atmel_nand_create(nc, nand_np, reg_cells);
if (IS_ERR(nand)) {
ret = PTR_ERR(nand);
goto err;
}
ret = atmel_nand_controller_add_nand(nc, nand);
if (ret)
goto err;
}
return 0;
err:
atmel_nand_controller_remove_nands(nc);
return ret;
}
static void atmel_nand_controller_cleanup(struct atmel_nand_controller *nc)
{
if (nc->dmac)
dma_release_channel(nc->dmac);
clk_put(nc->mck);
}
static const struct atmel_smc_nand_ebi_csa_cfg at91sam9260_ebi_csa = {
.offs = AT91SAM9260_MATRIX_EBICSA,
};
static const struct atmel_smc_nand_ebi_csa_cfg at91sam9261_ebi_csa = {
.offs = AT91SAM9261_MATRIX_EBICSA,
};
static const struct atmel_smc_nand_ebi_csa_cfg at91sam9263_ebi_csa = {
.offs = AT91SAM9263_MATRIX_EBI0CSA,
};
static const struct atmel_smc_nand_ebi_csa_cfg at91sam9rl_ebi_csa = {
.offs = AT91SAM9RL_MATRIX_EBICSA,
};
static const struct atmel_smc_nand_ebi_csa_cfg at91sam9g45_ebi_csa = {
.offs = AT91SAM9G45_MATRIX_EBICSA,
};
static const struct atmel_smc_nand_ebi_csa_cfg at91sam9n12_ebi_csa = {
.offs = AT91SAM9N12_MATRIX_EBICSA,
};
static const struct atmel_smc_nand_ebi_csa_cfg at91sam9x5_ebi_csa = {
.offs = AT91SAM9X5_MATRIX_EBICSA,
};
static const struct atmel_smc_nand_ebi_csa_cfg sam9x60_ebi_csa = {
.offs = AT91_SFR_CCFG_EBICSA,
.nfd0_on_d16 = AT91_SFR_CCFG_NFD0_ON_D16,
};
static const struct of_device_id __maybe_unused atmel_ebi_csa_regmap_of_ids[] = {
{
.compatible = "atmel,at91sam9260-matrix",
.data = &at91sam9260_ebi_csa,
},
{
.compatible = "atmel,at91sam9261-matrix",
.data = &at91sam9261_ebi_csa,
},
{
.compatible = "atmel,at91sam9263-matrix",
.data = &at91sam9263_ebi_csa,
},
{
.compatible = "atmel,at91sam9rl-matrix",
.data = &at91sam9rl_ebi_csa,
},
{
.compatible = "atmel,at91sam9g45-matrix",
.data = &at91sam9g45_ebi_csa,
},
{
.compatible = "atmel,at91sam9n12-matrix",
.data = &at91sam9n12_ebi_csa,
},
{
.compatible = "atmel,at91sam9x5-matrix",
.data = &at91sam9x5_ebi_csa,
},
{
.compatible = "microchip,sam9x60-sfr",
.data = &sam9x60_ebi_csa,
},
{ /* sentinel */ },
};
static int atmel_nand_attach_chip(struct nand_chip *chip)
{
struct atmel_nand_controller *nc = to_nand_controller(chip->controller);
struct atmel_nand *nand = to_atmel_nand(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
ret = nc->caps->ops->ecc_init(chip);
if (ret)
return ret;
if (nc->caps->legacy_of_bindings || !nc->dev->of_node) {
/*
* We keep the MTD name unchanged to avoid breaking platforms
* where the MTD cmdline parser is used and the bootloader
* has not been updated to use the new naming scheme.
*/
mtd->name = "atmel_nand";
} else if (!mtd->name) {
/*
* If the new bindings are used and the bootloader has not been
* updated to pass a new mtdparts parameter on the cmdline, you
* should define the following property in your nand node:
*
* label = "atmel_nand";
*
* This way, mtd->name will be set by the core when
* nand_set_flash_node() is called.
*/
mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL,
"%s:nand.%d", dev_name(nc->dev),
nand->cs[0].id);
if (!mtd->name) {
dev_err(nc->dev, "Failed to allocate mtd->name\n");
return -ENOMEM;
}
}
return 0;
}
static const struct nand_controller_ops atmel_nand_controller_ops = {
.attach_chip = atmel_nand_attach_chip,
.setup_interface = atmel_nand_setup_interface,
.exec_op = atmel_nand_exec_op,
};
static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
struct platform_device *pdev,
const struct atmel_nand_controller_caps *caps)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int ret;
nand_controller_init(&nc->base);
nc->base.ops = &atmel_nand_controller_ops;
INIT_LIST_HEAD(&nc->chips);
nc->dev = dev;
nc->caps = caps;
platform_set_drvdata(pdev, nc);
nc->pmecc = devm_atmel_pmecc_get(dev);
if (IS_ERR(nc->pmecc))
return dev_err_probe(dev, PTR_ERR(nc->pmecc),
"Could not get PMECC object\n");
if (nc->caps->has_dma && !atmel_nand_avoid_dma) {
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
nc->dmac = dma_request_channel(mask, NULL, NULL);
if (!nc->dmac)
dev_err(nc->dev, "Failed to request DMA channel\n");
}
/* We do not retrieve the SMC syscon when parsing old DTs. */
if (nc->caps->legacy_of_bindings)
return 0;
nc->mck = of_clk_get(dev->parent->of_node, 0);
if (IS_ERR(nc->mck)) {
dev_err(dev, "Failed to retrieve MCK clk\n");
ret = PTR_ERR(nc->mck);
goto out_release_dma;
}
np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
if (!np) {
dev_err(dev, "Missing or invalid atmel,smc property\n");
ret = -EINVAL;
goto out_release_dma;
}
nc->smc = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR(nc->smc)) {
ret = PTR_ERR(nc->smc);
dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret);
goto out_release_dma;
}
return 0;
out_release_dma:
if (nc->dmac)
dma_release_channel(nc->dmac);
return ret;
}
static int
atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc)
{
struct device *dev = nc->base.dev;
const struct of_device_id *match;
struct device_node *np;
int ret;
/* We do not retrieve the EBICSA regmap when parsing old DTs. */
if (nc->base.caps->legacy_of_bindings)
return 0;
np = of_parse_phandle(dev->parent->of_node,
nc->base.caps->ebi_csa_regmap_name, 0);
if (!np)
return 0;
match = of_match_node(atmel_ebi_csa_regmap_of_ids, np);
if (!match) {
of_node_put(np);
return 0;
}
nc->ebi_csa_regmap = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR(nc->ebi_csa_regmap)) {
ret = PTR_ERR(nc->ebi_csa_regmap);
dev_err(dev, "Could not get EBICSA regmap (err = %d)\n", ret);
return ret;
}
nc->ebi_csa = (struct atmel_smc_nand_ebi_csa_cfg *)match->data;
/*
* The at91sam9263 has 2 EBIs, if the NAND controller is under EBI1
* add 4 to ->ebi_csa->offs.
*/
if (of_device_is_compatible(dev->parent->of_node,
"atmel,at91sam9263-ebi1"))
nc->ebi_csa->offs += 4;
return 0;
}
static int
atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
{
struct regmap_config regmap_conf = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
};
struct device *dev = nc->base.dev;
struct device_node *nand_np, *nfc_np;
void __iomem *iomem;
struct resource res;
int ret;
nand_np = dev->of_node;
nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc");
if (!nfc_np) {
dev_err(dev, "Could not find device node for sama5d3-nfc\n");
return -ENODEV;
}
nc->clk = of_clk_get(nfc_np, 0);
if (IS_ERR(nc->clk)) {
ret = PTR_ERR(nc->clk);
dev_err(dev, "Failed to retrieve HSMC clock (err = %d)\n",
ret);
goto out;
}
ret = clk_prepare_enable(nc->clk);
if (ret) {
dev_err(dev, "Failed to enable the HSMC clock (err = %d)\n",
ret);
goto out;
}
nc->irq = of_irq_get(nand_np, 0);
if (nc->irq <= 0) {
ret = nc->irq ?: -ENXIO;
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get IRQ number (err = %d)\n",
ret);
goto out;
}
ret = of_address_to_resource(nfc_np, 0, &res);
if (ret) {
dev_err(dev, "Invalid or missing NFC IO resource (err = %d)\n",
ret);
goto out;
}
iomem = devm_ioremap_resource(dev, &res);
if (IS_ERR(iomem)) {
ret = PTR_ERR(iomem);
goto out;
}
regmap_conf.name = "nfc-io";
regmap_conf.max_register = resource_size(&res) - 4;
nc->io = devm_regmap_init_mmio(dev, iomem, ®map_conf);
if (IS_ERR(nc->io)) {
ret = PTR_ERR(nc->io);
dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
ret);
goto out;
}
ret = of_address_to_resource(nfc_np, 1, &res);
if (ret) {
dev_err(dev, "Invalid or missing HSMC resource (err = %d)\n",
ret);
goto out;
}
iomem = devm_ioremap_resource(dev, &res);
if (IS_ERR(iomem)) {
ret = PTR_ERR(iomem);
goto out;
}
regmap_conf.name = "smc";
regmap_conf.max_register = resource_size(&res) - 4;
nc->base.smc = devm_regmap_init_mmio(dev, iomem, ®map_conf);
if (IS_ERR(nc->base.smc)) {
ret = PTR_ERR(nc->base.smc);
dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
ret);
goto out;
}
ret = of_address_to_resource(nfc_np, 2, &res);
if (ret) {
dev_err(dev, "Invalid or missing SRAM resource (err = %d)\n",
ret);
goto out;
}
nc->sram.virt = devm_ioremap_resource(dev, &res);
if (IS_ERR(nc->sram.virt)) {
ret = PTR_ERR(nc->sram.virt);
goto out;
}
nc->sram.dma = res.start;
out:
of_node_put(nfc_np);
return ret;
}
static int
atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller *nc)
{
struct device *dev = nc->base.dev;
struct device_node *np;
int ret;
np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
if (!np) {
dev_err(dev, "Missing or invalid atmel,smc property\n");
return -EINVAL;
}
nc->hsmc_layout = atmel_hsmc_get_reg_layout(np);
nc->irq = of_irq_get(np, 0);
of_node_put(np);
if (nc->irq <= 0) {
ret = nc->irq ?: -ENXIO;
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get IRQ number (err = %d)\n",
ret);
return ret;
}
np = of_parse_phandle(dev->of_node, "atmel,nfc-io", 0);
if (!np) {
dev_err(dev, "Missing or invalid atmel,nfc-io property\n");
return -EINVAL;
}
nc->io = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR(nc->io)) {
ret = PTR_ERR(nc->io);
dev_err(dev, "Could not get NFC IO regmap (err = %d)\n", ret);
return ret;
}
nc->sram.pool = of_gen_pool_get(nc->base.dev->of_node,
"atmel,nfc-sram", 0);
if (!nc->sram.pool) {
dev_err(nc->base.dev, "Missing SRAM\n");
return -ENOMEM;
}
nc->sram.virt = (void __iomem *)gen_pool_dma_alloc(nc->sram.pool,
ATMEL_NFC_SRAM_SIZE,
&nc->sram.dma);
if (!nc->sram.virt) {
dev_err(nc->base.dev,
"Could not allocate memory from the NFC SRAM pool\n");
return -ENOMEM;
}
return 0;
}
static int
atmel_hsmc_nand_controller_remove(struct atmel_nand_controller *nc)
{
struct atmel_hsmc_nand_controller *hsmc_nc;
int ret;
ret = atmel_nand_controller_remove_nands(nc);
if (ret)
return ret;
hsmc_nc = container_of(nc, struct atmel_hsmc_nand_controller, base);
regmap_write(hsmc_nc->base.smc, ATMEL_HSMC_NFC_CTRL,
ATMEL_HSMC_NFC_CTRL_DIS);
if (hsmc_nc->sram.pool)
gen_pool_free(hsmc_nc->sram.pool,
(unsigned long)hsmc_nc->sram.virt,
ATMEL_NFC_SRAM_SIZE);
if (hsmc_nc->clk) {
clk_disable_unprepare(hsmc_nc->clk);
clk_put(hsmc_nc->clk);
}
atmel_nand_controller_cleanup(nc);
return 0;
}
static int atmel_hsmc_nand_controller_probe(struct platform_device *pdev,
const struct atmel_nand_controller_caps *caps)
{
struct device *dev = &pdev->dev;
struct atmel_hsmc_nand_controller *nc;
int ret;
nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
if (!nc)
return -ENOMEM;
ret = atmel_nand_controller_init(&nc->base, pdev, caps);
if (ret)
return ret;
if (caps->legacy_of_bindings)
ret = atmel_hsmc_nand_controller_legacy_init(nc);
else
ret = atmel_hsmc_nand_controller_init(nc);
if (ret)
return ret;
/* Make sure all irqs are masked before registering our IRQ handler. */
regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
ret = devm_request_irq(dev, nc->irq, atmel_nfc_interrupt,
IRQF_SHARED, "nfc", nc);
if (ret) {
dev_err(dev,
"Could not get register NFC interrupt handler (err = %d)\n",
ret);
goto err;
}
/* Initial NFC configuration. */
regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CFG,
ATMEL_HSMC_NFC_CFG_DTO_MAX);
regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
ATMEL_HSMC_NFC_CTRL_EN);
ret = atmel_nand_controller_add_nands(&nc->base);
if (ret)
goto err;
return 0;
err:
atmel_hsmc_nand_controller_remove(&nc->base);
return ret;
}
static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = {
.probe = atmel_hsmc_nand_controller_probe,
.remove = atmel_hsmc_nand_controller_remove,
.ecc_init = atmel_hsmc_nand_ecc_init,
.nand_init = atmel_nand_init,
.setup_interface = atmel_hsmc_nand_setup_interface,
.exec_op = atmel_hsmc_nand_exec_op,
};
static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = {
.has_dma = true,
.ale_offs = BIT(21),
.cle_offs = BIT(22),
.ops = &atmel_hsmc_nc_ops,
};
/* Only used to parse old bindings. */
static const struct atmel_nand_controller_caps atmel_sama5_nand_caps = {
.has_dma = true,
.ale_offs = BIT(21),
.cle_offs = BIT(22),
.ops = &atmel_hsmc_nc_ops,
.legacy_of_bindings = true,
};
static int atmel_smc_nand_controller_probe(struct platform_device *pdev,
const struct atmel_nand_controller_caps *caps)
{
struct device *dev = &pdev->dev;
struct atmel_smc_nand_controller *nc;
int ret;
nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
if (!nc)
return -ENOMEM;
ret = atmel_nand_controller_init(&nc->base, pdev, caps);
if (ret)
return ret;
ret = atmel_smc_nand_controller_init(nc);
if (ret)
return ret;
return atmel_nand_controller_add_nands(&nc->base);
}
static int
atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc)
{
int ret;
ret = atmel_nand_controller_remove_nands(nc);
if (ret)
return ret;
atmel_nand_controller_cleanup(nc);
return 0;
}
/*
* The SMC reg layout of at91rm9200 is completely different which prevents us
* from re-using atmel_smc_nand_setup_interface() for the
* ->setup_interface() hook.
* At this point, there's no support for the at91rm9200 SMC IP, so we leave
* ->setup_interface() unassigned.
*/
static const struct atmel_nand_controller_ops at91rm9200_nc_ops = {
.probe = atmel_smc_nand_controller_probe,
.remove = atmel_smc_nand_controller_remove,
.ecc_init = atmel_nand_ecc_init,
.nand_init = atmel_smc_nand_init,
.exec_op = atmel_smc_nand_exec_op,
};
static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = {
.ale_offs = BIT(21),
.cle_offs = BIT(22),
.ebi_csa_regmap_name = "atmel,matrix",
.ops = &at91rm9200_nc_ops,
};
static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
.probe = atmel_smc_nand_controller_probe,
.remove = atmel_smc_nand_controller_remove,
.ecc_init = atmel_nand_ecc_init,
.nand_init = atmel_smc_nand_init,
.setup_interface = atmel_smc_nand_setup_interface,
.exec_op = atmel_smc_nand_exec_op,
};
static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = {
.ale_offs = BIT(21),
.cle_offs = BIT(22),
.ebi_csa_regmap_name = "atmel,matrix",
.ops = &atmel_smc_nc_ops,
};
static const struct atmel_nand_controller_caps atmel_sam9261_nc_caps = {
.ale_offs = BIT(22),
.cle_offs = BIT(21),
.ebi_csa_regmap_name = "atmel,matrix",
.ops = &atmel_smc_nc_ops,
};
static const struct atmel_nand_controller_caps atmel_sam9g45_nc_caps = {
.has_dma = true,
.ale_offs = BIT(21),
.cle_offs = BIT(22),
.ebi_csa_regmap_name = "atmel,matrix",
.ops = &atmel_smc_nc_ops,
};
static const struct atmel_nand_controller_caps microchip_sam9x60_nc_caps = {
.has_dma = true,
.ale_offs = BIT(21),
.cle_offs = BIT(22),
.ebi_csa_regmap_name = "microchip,sfr",
.ops = &atmel_smc_nc_ops,
};
/* Only used to parse old bindings. */
static const struct atmel_nand_controller_caps atmel_rm9200_nand_caps = {
.ale_offs = BIT(21),
.cle_offs = BIT(22),
.ops = &atmel_smc_nc_ops,
.legacy_of_bindings = true,
};
static const struct atmel_nand_controller_caps atmel_sam9261_nand_caps = {
.ale_offs = BIT(22),
.cle_offs = BIT(21),
.ops = &atmel_smc_nc_ops,
.legacy_of_bindings = true,
};
static const struct atmel_nand_controller_caps atmel_sam9g45_nand_caps = {
.has_dma = true,
.ale_offs = BIT(21),
.cle_offs = BIT(22),
.ops = &atmel_smc_nc_ops,
.legacy_of_bindings = true,
};
static const struct of_device_id atmel_nand_controller_of_ids[] = {
{
.compatible = "atmel,at91rm9200-nand-controller",
.data = &atmel_rm9200_nc_caps,
},
{
.compatible = "atmel,at91sam9260-nand-controller",
.data = &atmel_sam9260_nc_caps,
},
{
.compatible = "atmel,at91sam9261-nand-controller",
.data = &atmel_sam9261_nc_caps,
},
{
.compatible = "atmel,at91sam9g45-nand-controller",
.data = &atmel_sam9g45_nc_caps,
},
{
.compatible = "atmel,sama5d3-nand-controller",
.data = &atmel_sama5_nc_caps,
},
{
.compatible = "microchip,sam9x60-nand-controller",
.data = µchip_sam9x60_nc_caps,
},
/* Support for old/deprecated bindings: */
{
.compatible = "atmel,at91rm9200-nand",
.data = &atmel_rm9200_nand_caps,
},
{
.compatible = "atmel,sama5d4-nand",
.data = &atmel_rm9200_nand_caps,
},
{
.compatible = "atmel,sama5d2-nand",
.data = &atmel_rm9200_nand_caps,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, atmel_nand_controller_of_ids);
static int atmel_nand_controller_probe(struct platform_device *pdev)
{
const struct atmel_nand_controller_caps *caps;
if (pdev->id_entry)
caps = (void *)pdev->id_entry->driver_data;
else
caps = of_device_get_match_data(&pdev->dev);
if (!caps) {
dev_err(&pdev->dev, "Could not retrieve NFC caps\n");
return -EINVAL;
}
if (caps->legacy_of_bindings) {
struct device_node *nfc_node;
u32 ale_offs = 21;
/*
* If we are parsing legacy DT props and the DT contains a
* valid NFC node, forward the request to the sama5 logic.
*/
nfc_node = of_get_compatible_child(pdev->dev.of_node,
"atmel,sama5d3-nfc");
if (nfc_node) {
caps = &atmel_sama5_nand_caps;
of_node_put(nfc_node);
}
/*
* Even if the compatible says we are dealing with an
* at91rm9200 controller, the atmel,nand-has-dma specify that
* this controller supports DMA, which means we are in fact
* dealing with an at91sam9g45+ controller.
*/
if (!caps->has_dma &&
of_property_read_bool(pdev->dev.of_node,
"atmel,nand-has-dma"))
caps = &atmel_sam9g45_nand_caps;
/*
* All SoCs except the at91sam9261 are assigning ALE to A21 and
* CLE to A22. If atmel,nand-addr-offset != 21 this means we're
* actually dealing with an at91sam9261 controller.
*/
of_property_read_u32(pdev->dev.of_node,
"atmel,nand-addr-offset", &ale_offs);
if (ale_offs != 21)
caps = &atmel_sam9261_nand_caps;
}
return caps->ops->probe(pdev, caps);
}
static void atmel_nand_controller_remove(struct platform_device *pdev)
{
struct atmel_nand_controller *nc = platform_get_drvdata(pdev);
WARN_ON(nc->caps->ops->remove(nc));
}
static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
{
struct atmel_nand_controller *nc = dev_get_drvdata(dev);
struct atmel_nand *nand;
if (nc->pmecc)
atmel_pmecc_reset(nc->pmecc);
list_for_each_entry(nand, &nc->chips, node) {
int i;
for (i = 0; i < nand->numcs; i++)
nand_reset(&nand->base, i);
}
return 0;
}
static SIMPLE_DEV_PM_OPS(atmel_nand_controller_pm_ops, NULL,
atmel_nand_controller_resume);
static struct platform_driver atmel_nand_controller_driver = {
.driver = {
.name = "atmel-nand-controller",
.of_match_table = atmel_nand_controller_of_ids,
.pm = &atmel_nand_controller_pm_ops,
},
.probe = atmel_nand_controller_probe,
.remove_new = atmel_nand_controller_remove,
};
module_platform_driver(atmel_nand_controller_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Boris Brezillon <[email protected]>");
MODULE_DESCRIPTION("NAND Flash Controller driver for Atmel SoCs");
MODULE_ALIAS("platform:atmel-nand-controller");
| linux-master | drivers/mtd/nand/raw/atmel/nand-controller.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2017 ATMEL
* Copyright 2017 Free Electrons
*
* Author: Boris Brezillon <[email protected]>
*
* Derived from the atmel_nand.c driver which contained the following
* copyrights:
*
* Copyright 2003 Rick Bronson
*
* Derived from drivers/mtd/nand/autcpu12.c (removed in v3.8)
* Copyright 2001 Thomas Gleixner ([email protected])
*
* Derived from drivers/mtd/spia.c (removed in v3.8)
* Copyright 2000 Steven J. Hill ([email protected])
*
* Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
* Richard Genoud ([email protected]), Adeneo Copyright 2007
*
* Derived from Das U-Boot source code
* (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
* Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
*
* Add Programmable Multibit ECC support for various AT91 SoC
* Copyright 2012 ATMEL, Hong Xu
*
* Add Nand Flash Controller support for SAMA5 SoC
* Copyright 2013 ATMEL, Josh Wu ([email protected])
*
* The PMECC is an hardware assisted BCH engine, which means part of the
* ECC algorithm is left to the software. The hardware/software repartition
* is explained in the "PMECC Controller Functional Description" chapter in
* Atmel datasheets, and some of the functions in this file are directly
* implementing the algorithms described in the "Software Implementation"
* sub-section.
*
* TODO: it seems that the software BCH implementation in lib/bch.c is already
* providing some of the logic we are implementing here. It would be smart
* to expose the needed lib/bch.c helpers/functions and re-use them here.
*/
#include <linux/genalloc.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mtd/rawnand.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "pmecc.h"
/* Galois field dimension */
#define PMECC_GF_DIMENSION_13 13
#define PMECC_GF_DIMENSION_14 14
/* Primitive Polynomial used by PMECC */
#define PMECC_GF_13_PRIMITIVE_POLY 0x201b
#define PMECC_GF_14_PRIMITIVE_POLY 0x4443
#define PMECC_LOOKUP_TABLE_SIZE_512 0x2000
#define PMECC_LOOKUP_TABLE_SIZE_1024 0x4000
/* Time out value for reading PMECC status register */
#define PMECC_MAX_TIMEOUT_MS 100
/* PMECC Register Definitions */
#define ATMEL_PMECC_CFG 0x0
#define PMECC_CFG_BCH_STRENGTH(x) (x)
#define PMECC_CFG_BCH_STRENGTH_MASK GENMASK(2, 0)
#define PMECC_CFG_SECTOR512 (0 << 4)
#define PMECC_CFG_SECTOR1024 (1 << 4)
#define PMECC_CFG_NSECTORS(x) ((fls(x) - 1) << 8)
#define PMECC_CFG_READ_OP (0 << 12)
#define PMECC_CFG_WRITE_OP (1 << 12)
#define PMECC_CFG_SPARE_ENABLE BIT(16)
#define PMECC_CFG_AUTO_ENABLE BIT(20)
#define ATMEL_PMECC_SAREA 0x4
#define ATMEL_PMECC_SADDR 0x8
#define ATMEL_PMECC_EADDR 0xc
#define ATMEL_PMECC_CLK 0x10
#define PMECC_CLK_133MHZ (2 << 0)
#define ATMEL_PMECC_CTRL 0x14
#define PMECC_CTRL_RST BIT(0)
#define PMECC_CTRL_DATA BIT(1)
#define PMECC_CTRL_USER BIT(2)
#define PMECC_CTRL_ENABLE BIT(4)
#define PMECC_CTRL_DISABLE BIT(5)
#define ATMEL_PMECC_SR 0x18
#define PMECC_SR_BUSY BIT(0)
#define PMECC_SR_ENABLE BIT(4)
#define ATMEL_PMECC_IER 0x1c
#define ATMEL_PMECC_IDR 0x20
#define ATMEL_PMECC_IMR 0x24
#define ATMEL_PMECC_ISR 0x28
#define PMECC_ERROR_INT BIT(0)
#define ATMEL_PMECC_ECC(sector, n) \
((((sector) + 1) * 0x40) + (n))
#define ATMEL_PMECC_REM(sector, n) \
((((sector) + 1) * 0x40) + ((n) * 4) + 0x200)
/* PMERRLOC Register Definitions */
#define ATMEL_PMERRLOC_ELCFG 0x0
#define PMERRLOC_ELCFG_SECTOR_512 (0 << 0)
#define PMERRLOC_ELCFG_SECTOR_1024 (1 << 0)
#define PMERRLOC_ELCFG_NUM_ERRORS(n) ((n) << 16)
#define ATMEL_PMERRLOC_ELPRIM 0x4
#define ATMEL_PMERRLOC_ELEN 0x8
#define ATMEL_PMERRLOC_ELDIS 0xc
#define PMERRLOC_DISABLE BIT(0)
#define ATMEL_PMERRLOC_ELSR 0x10
#define PMERRLOC_ELSR_BUSY BIT(0)
#define ATMEL_PMERRLOC_ELIER 0x14
#define ATMEL_PMERRLOC_ELIDR 0x18
#define ATMEL_PMERRLOC_ELIMR 0x1c
#define ATMEL_PMERRLOC_ELISR 0x20
#define PMERRLOC_ERR_NUM_MASK GENMASK(12, 8)
#define PMERRLOC_CALC_DONE BIT(0)
#define ATMEL_PMERRLOC_SIGMA(x) (((x) * 0x4) + 0x28)
#define ATMEL_PMERRLOC_EL(offs, x) (((x) * 0x4) + (offs))
struct atmel_pmecc_gf_tables {
u16 *alpha_to;
u16 *index_of;
};
struct atmel_pmecc_caps {
const int *strengths;
int nstrengths;
int el_offset;
bool correct_erased_chunks;
};
struct atmel_pmecc {
struct device *dev;
const struct atmel_pmecc_caps *caps;
struct {
void __iomem *base;
void __iomem *errloc;
} regs;
struct mutex lock;
};
struct atmel_pmecc_user_conf_cache {
u32 cfg;
u32 sarea;
u32 saddr;
u32 eaddr;
};
struct atmel_pmecc_user {
struct atmel_pmecc_user_conf_cache cache;
struct atmel_pmecc *pmecc;
const struct atmel_pmecc_gf_tables *gf_tables;
int eccbytes;
s16 *partial_syn;
s16 *si;
s16 *lmu;
s16 *smu;
s32 *mu;
s32 *dmu;
s32 *delta;
u32 isr;
};
static DEFINE_MUTEX(pmecc_gf_tables_lock);
static const struct atmel_pmecc_gf_tables *pmecc_gf_tables_512;
static const struct atmel_pmecc_gf_tables *pmecc_gf_tables_1024;
static inline int deg(unsigned int poly)
{
/* polynomial degree is the most-significant bit index */
return fls(poly) - 1;
}
static int atmel_pmecc_build_gf_tables(int mm, unsigned int poly,
struct atmel_pmecc_gf_tables *gf_tables)
{
unsigned int i, x = 1;
const unsigned int k = BIT(deg(poly));
unsigned int nn = BIT(mm) - 1;
/* primitive polynomial must be of degree m */
if (k != (1u << mm))
return -EINVAL;
for (i = 0; i < nn; i++) {
gf_tables->alpha_to[i] = x;
gf_tables->index_of[x] = i;
if (i && (x == 1))
/* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */
return -EINVAL;
x <<= 1;
if (x & k)
x ^= poly;
}
gf_tables->alpha_to[nn] = 1;
gf_tables->index_of[0] = 0;
return 0;
}
static const struct atmel_pmecc_gf_tables *
atmel_pmecc_create_gf_tables(const struct atmel_pmecc_user_req *req)
{
struct atmel_pmecc_gf_tables *gf_tables;
unsigned int poly, degree, table_size;
int ret;
if (req->ecc.sectorsize == 512) {
degree = PMECC_GF_DIMENSION_13;
poly = PMECC_GF_13_PRIMITIVE_POLY;
table_size = PMECC_LOOKUP_TABLE_SIZE_512;
} else {
degree = PMECC_GF_DIMENSION_14;
poly = PMECC_GF_14_PRIMITIVE_POLY;
table_size = PMECC_LOOKUP_TABLE_SIZE_1024;
}
gf_tables = kzalloc(sizeof(*gf_tables) +
(2 * table_size * sizeof(u16)),
GFP_KERNEL);
if (!gf_tables)
return ERR_PTR(-ENOMEM);
gf_tables->alpha_to = (void *)(gf_tables + 1);
gf_tables->index_of = gf_tables->alpha_to + table_size;
ret = atmel_pmecc_build_gf_tables(degree, poly, gf_tables);
if (ret) {
kfree(gf_tables);
return ERR_PTR(ret);
}
return gf_tables;
}
static const struct atmel_pmecc_gf_tables *
atmel_pmecc_get_gf_tables(const struct atmel_pmecc_user_req *req)
{
const struct atmel_pmecc_gf_tables **gf_tables, *ret;
mutex_lock(&pmecc_gf_tables_lock);
if (req->ecc.sectorsize == 512)
gf_tables = &pmecc_gf_tables_512;
else
gf_tables = &pmecc_gf_tables_1024;
ret = *gf_tables;
if (!ret) {
ret = atmel_pmecc_create_gf_tables(req);
if (!IS_ERR(ret))
*gf_tables = ret;
}
mutex_unlock(&pmecc_gf_tables_lock);
return ret;
}
static int atmel_pmecc_prepare_user_req(struct atmel_pmecc *pmecc,
struct atmel_pmecc_user_req *req)
{
int i, max_eccbytes, eccbytes = 0, eccstrength = 0;
if (req->pagesize <= 0 || req->oobsize <= 0 || req->ecc.bytes <= 0)
return -EINVAL;
if (req->ecc.ooboffset >= 0 &&
req->ecc.ooboffset + req->ecc.bytes > req->oobsize)
return -EINVAL;
if (req->ecc.sectorsize == ATMEL_PMECC_SECTOR_SIZE_AUTO) {
if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH)
return -EINVAL;
if (req->pagesize > 512)
req->ecc.sectorsize = 1024;
else
req->ecc.sectorsize = 512;
}
if (req->ecc.sectorsize != 512 && req->ecc.sectorsize != 1024)
return -EINVAL;
if (req->pagesize % req->ecc.sectorsize)
return -EINVAL;
req->ecc.nsectors = req->pagesize / req->ecc.sectorsize;
max_eccbytes = req->ecc.bytes;
for (i = 0; i < pmecc->caps->nstrengths; i++) {
int nbytes, strength = pmecc->caps->strengths[i];
if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH &&
strength < req->ecc.strength)
continue;
nbytes = DIV_ROUND_UP(strength * fls(8 * req->ecc.sectorsize),
8);
nbytes *= req->ecc.nsectors;
if (nbytes > max_eccbytes)
break;
eccstrength = strength;
eccbytes = nbytes;
if (req->ecc.strength != ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH)
break;
}
if (!eccstrength)
return -EINVAL;
req->ecc.bytes = eccbytes;
req->ecc.strength = eccstrength;
if (req->ecc.ooboffset < 0)
req->ecc.ooboffset = req->oobsize - eccbytes;
return 0;
}
struct atmel_pmecc_user *
atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
struct atmel_pmecc_user_req *req)
{
struct atmel_pmecc_user *user;
const struct atmel_pmecc_gf_tables *gf_tables;
int strength, size, ret;
ret = atmel_pmecc_prepare_user_req(pmecc, req);
if (ret)
return ERR_PTR(ret);
size = sizeof(*user);
size = ALIGN(size, sizeof(u16));
/* Reserve space for partial_syn, si and smu */
size += ((2 * req->ecc.strength) + 1) * sizeof(u16) *
(2 + req->ecc.strength + 2);
/* Reserve space for lmu. */
size += (req->ecc.strength + 1) * sizeof(u16);
/* Reserve space for mu, dmu and delta. */
size = ALIGN(size, sizeof(s32));
size += (req->ecc.strength + 1) * sizeof(s32) * 3;
user = kzalloc(size, GFP_KERNEL);
if (!user)
return ERR_PTR(-ENOMEM);
user->pmecc = pmecc;
user->partial_syn = (s16 *)PTR_ALIGN(user + 1, sizeof(u16));
user->si = user->partial_syn + ((2 * req->ecc.strength) + 1);
user->lmu = user->si + ((2 * req->ecc.strength) + 1);
user->smu = user->lmu + (req->ecc.strength + 1);
user->mu = (s32 *)PTR_ALIGN(user->smu +
(((2 * req->ecc.strength) + 1) *
(req->ecc.strength + 2)),
sizeof(s32));
user->dmu = user->mu + req->ecc.strength + 1;
user->delta = user->dmu + req->ecc.strength + 1;
gf_tables = atmel_pmecc_get_gf_tables(req);
if (IS_ERR(gf_tables)) {
kfree(user);
return ERR_CAST(gf_tables);
}
user->gf_tables = gf_tables;
user->eccbytes = req->ecc.bytes / req->ecc.nsectors;
for (strength = 0; strength < pmecc->caps->nstrengths; strength++) {
if (pmecc->caps->strengths[strength] == req->ecc.strength)
break;
}
user->cache.cfg = PMECC_CFG_BCH_STRENGTH(strength) |
PMECC_CFG_NSECTORS(req->ecc.nsectors);
if (req->ecc.sectorsize == 1024)
user->cache.cfg |= PMECC_CFG_SECTOR1024;
user->cache.sarea = req->oobsize - 1;
user->cache.saddr = req->ecc.ooboffset;
user->cache.eaddr = req->ecc.ooboffset + req->ecc.bytes - 1;
return user;
}
EXPORT_SYMBOL_GPL(atmel_pmecc_create_user);
void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user)
{
kfree(user);
}
EXPORT_SYMBOL_GPL(atmel_pmecc_destroy_user);
static int get_strength(struct atmel_pmecc_user *user)
{
const int *strengths = user->pmecc->caps->strengths;
return strengths[user->cache.cfg & PMECC_CFG_BCH_STRENGTH_MASK];
}
static int get_sectorsize(struct atmel_pmecc_user *user)
{
return user->cache.cfg & PMECC_CFG_SECTOR1024 ? 1024 : 512;
}
static void atmel_pmecc_gen_syndrome(struct atmel_pmecc_user *user, int sector)
{
int strength = get_strength(user);
u32 value;
int i;
/* Fill odd syndromes */
for (i = 0; i < strength; i++) {
value = readl_relaxed(user->pmecc->regs.base +
ATMEL_PMECC_REM(sector, i / 2));
if (i & 1)
value >>= 16;
user->partial_syn[(2 * i) + 1] = value;
}
}
static void atmel_pmecc_substitute(struct atmel_pmecc_user *user)
{
int degree = get_sectorsize(user) == 512 ? 13 : 14;
int cw_len = BIT(degree) - 1;
int strength = get_strength(user);
s16 *alpha_to = user->gf_tables->alpha_to;
s16 *index_of = user->gf_tables->index_of;
s16 *partial_syn = user->partial_syn;
s16 *si;
int i, j;
/*
* si[] is a table that holds the current syndrome value,
* an element of that table belongs to the field
*/
si = user->si;
memset(&si[1], 0, sizeof(s16) * ((2 * strength) - 1));
/* Computation 2t syndromes based on S(x) */
/* Odd syndromes */
for (i = 1; i < 2 * strength; i += 2) {
for (j = 0; j < degree; j++) {
if (partial_syn[i] & BIT(j))
si[i] = alpha_to[i * j] ^ si[i];
}
}
/* Even syndrome = (Odd syndrome) ** 2 */
for (i = 2, j = 1; j <= strength; i = ++j << 1) {
if (si[j] == 0) {
si[i] = 0;
} else {
s16 tmp;
tmp = index_of[si[j]];
tmp = (tmp * 2) % cw_len;
si[i] = alpha_to[tmp];
}
}
}
static void atmel_pmecc_get_sigma(struct atmel_pmecc_user *user)
{
s16 *lmu = user->lmu;
s16 *si = user->si;
s32 *mu = user->mu;
s32 *dmu = user->dmu;
s32 *delta = user->delta;
int degree = get_sectorsize(user) == 512 ? 13 : 14;
int cw_len = BIT(degree) - 1;
int strength = get_strength(user);
int num = 2 * strength + 1;
s16 *index_of = user->gf_tables->index_of;
s16 *alpha_to = user->gf_tables->alpha_to;
int i, j, k;
u32 dmu_0_count, tmp;
s16 *smu = user->smu;
/* index of largest delta */
int ro;
int largest;
int diff;
dmu_0_count = 0;
/* First Row */
/* Mu */
mu[0] = -1;
memset(smu, 0, sizeof(s16) * num);
smu[0] = 1;
/* discrepancy set to 1 */
dmu[0] = 1;
/* polynom order set to 0 */
lmu[0] = 0;
delta[0] = (mu[0] * 2 - lmu[0]) >> 1;
/* Second Row */
/* Mu */
mu[1] = 0;
/* Sigma(x) set to 1 */
memset(&smu[num], 0, sizeof(s16) * num);
smu[num] = 1;
/* discrepancy set to S1 */
dmu[1] = si[1];
/* polynom order set to 0 */
lmu[1] = 0;
delta[1] = (mu[1] * 2 - lmu[1]) >> 1;
/* Init the Sigma(x) last row */
memset(&smu[(strength + 1) * num], 0, sizeof(s16) * num);
for (i = 1; i <= strength; i++) {
mu[i + 1] = i << 1;
/* Begin Computing Sigma (Mu+1) and L(mu) */
/* check if discrepancy is set to 0 */
if (dmu[i] == 0) {
dmu_0_count++;
tmp = ((strength - (lmu[i] >> 1) - 1) / 2);
if ((strength - (lmu[i] >> 1) - 1) & 0x1)
tmp += 2;
else
tmp += 1;
if (dmu_0_count == tmp) {
for (j = 0; j <= (lmu[i] >> 1) + 1; j++)
smu[(strength + 1) * num + j] =
smu[i * num + j];
lmu[strength + 1] = lmu[i];
return;
}
/* copy polynom */
for (j = 0; j <= lmu[i] >> 1; j++)
smu[(i + 1) * num + j] = smu[i * num + j];
/* copy previous polynom order to the next */
lmu[i + 1] = lmu[i];
} else {
ro = 0;
largest = -1;
/* find largest delta with dmu != 0 */
for (j = 0; j < i; j++) {
if ((dmu[j]) && (delta[j] > largest)) {
largest = delta[j];
ro = j;
}
}
/* compute difference */
diff = (mu[i] - mu[ro]);
/* Compute degree of the new smu polynomial */
if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff))
lmu[i + 1] = lmu[i];
else
lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2;
/* Init smu[i+1] with 0 */
for (k = 0; k < num; k++)
smu[(i + 1) * num + k] = 0;
/* Compute smu[i+1] */
for (k = 0; k <= lmu[ro] >> 1; k++) {
s16 a, b, c;
if (!(smu[ro * num + k] && dmu[i]))
continue;
a = index_of[dmu[i]];
b = index_of[dmu[ro]];
c = index_of[smu[ro * num + k]];
tmp = a + (cw_len - b) + c;
a = alpha_to[tmp % cw_len];
smu[(i + 1) * num + (k + diff)] = a;
}
for (k = 0; k <= lmu[i] >> 1; k++)
smu[(i + 1) * num + k] ^= smu[i * num + k];
}
/* End Computing Sigma (Mu+1) and L(mu) */
/* In either case compute delta */
delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1;
/* Do not compute discrepancy for the last iteration */
if (i >= strength)
continue;
for (k = 0; k <= (lmu[i + 1] >> 1); k++) {
tmp = 2 * (i - 1);
if (k == 0) {
dmu[i + 1] = si[tmp + 3];
} else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) {
s16 a, b, c;
a = index_of[smu[(i + 1) * num + k]];
b = si[2 * (i - 1) + 3 - k];
c = index_of[b];
tmp = a + c;
tmp %= cw_len;
dmu[i + 1] = alpha_to[tmp] ^ dmu[i + 1];
}
}
}
}
static int atmel_pmecc_err_location(struct atmel_pmecc_user *user)
{
int sector_size = get_sectorsize(user);
int degree = sector_size == 512 ? 13 : 14;
struct atmel_pmecc *pmecc = user->pmecc;
int strength = get_strength(user);
int ret, roots_nbr, i, err_nbr = 0;
int num = (2 * strength) + 1;
s16 *smu = user->smu;
u32 val;
writel(PMERRLOC_DISABLE, pmecc->regs.errloc + ATMEL_PMERRLOC_ELDIS);
for (i = 0; i <= user->lmu[strength + 1] >> 1; i++) {
writel_relaxed(smu[(strength + 1) * num + i],
pmecc->regs.errloc + ATMEL_PMERRLOC_SIGMA(i));
err_nbr++;
}
val = (err_nbr - 1) << 16;
if (sector_size == 1024)
val |= 1;
writel(val, pmecc->regs.errloc + ATMEL_PMERRLOC_ELCFG);
writel((sector_size * 8) + (degree * strength),
pmecc->regs.errloc + ATMEL_PMERRLOC_ELEN);
ret = readl_relaxed_poll_timeout(pmecc->regs.errloc +
ATMEL_PMERRLOC_ELISR,
val, val & PMERRLOC_CALC_DONE, 0,
PMECC_MAX_TIMEOUT_MS * 1000);
if (ret) {
dev_err(pmecc->dev,
"PMECC: Timeout to calculate error location.\n");
return ret;
}
roots_nbr = (val & PMERRLOC_ERR_NUM_MASK) >> 8;
/* Number of roots == degree of smu hence <= cap */
if (roots_nbr == user->lmu[strength + 1] >> 1)
return err_nbr - 1;
/*
* Number of roots does not match the degree of smu
* unable to correct error.
*/
return -EBADMSG;
}
int atmel_pmecc_correct_sector(struct atmel_pmecc_user *user, int sector,
void *data, void *ecc)
{
struct atmel_pmecc *pmecc = user->pmecc;
int sectorsize = get_sectorsize(user);
int eccbytes = user->eccbytes;
int i, nerrors;
if (!(user->isr & BIT(sector)))
return 0;
atmel_pmecc_gen_syndrome(user, sector);
atmel_pmecc_substitute(user);
atmel_pmecc_get_sigma(user);
nerrors = atmel_pmecc_err_location(user);
if (nerrors < 0)
return nerrors;
for (i = 0; i < nerrors; i++) {
const char *area;
int byte, bit;
u32 errpos;
u8 *ptr;
errpos = readl_relaxed(pmecc->regs.errloc +
ATMEL_PMERRLOC_EL(pmecc->caps->el_offset, i));
errpos--;
byte = errpos / 8;
bit = errpos % 8;
if (byte < sectorsize) {
ptr = data + byte;
area = "data";
} else if (byte < sectorsize + eccbytes) {
ptr = ecc + byte - sectorsize;
area = "ECC";
} else {
dev_dbg(pmecc->dev,
"Invalid errpos value (%d, max is %d)\n",
errpos, (sectorsize + eccbytes) * 8);
return -EINVAL;
}
dev_dbg(pmecc->dev,
"Bit flip in %s area, byte %d: 0x%02x -> 0x%02x\n",
area, byte, *ptr, (unsigned int)(*ptr ^ BIT(bit)));
*ptr ^= BIT(bit);
}
return nerrors;
}
EXPORT_SYMBOL_GPL(atmel_pmecc_correct_sector);
bool atmel_pmecc_correct_erased_chunks(struct atmel_pmecc_user *user)
{
return user->pmecc->caps->correct_erased_chunks;
}
EXPORT_SYMBOL_GPL(atmel_pmecc_correct_erased_chunks);
void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
int sector, void *ecc)
{
struct atmel_pmecc *pmecc = user->pmecc;
u8 *ptr = ecc;
int i;
for (i = 0; i < user->eccbytes; i++)
ptr[i] = readb_relaxed(pmecc->regs.base +
ATMEL_PMECC_ECC(sector, i));
}
EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes);
void atmel_pmecc_reset(struct atmel_pmecc *pmecc)
{
writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
}
EXPORT_SYMBOL_GPL(atmel_pmecc_reset);
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op)
{
struct atmel_pmecc *pmecc = user->pmecc;
u32 cfg;
if (op != NAND_ECC_READ && op != NAND_ECC_WRITE) {
dev_err(pmecc->dev, "Bad ECC operation!");
return -EINVAL;
}
mutex_lock(&user->pmecc->lock);
cfg = user->cache.cfg;
if (op == NAND_ECC_WRITE)
cfg |= PMECC_CFG_WRITE_OP;
else
cfg |= PMECC_CFG_AUTO_ENABLE;
writel(cfg, pmecc->regs.base + ATMEL_PMECC_CFG);
writel(user->cache.sarea, pmecc->regs.base + ATMEL_PMECC_SAREA);
writel(user->cache.saddr, pmecc->regs.base + ATMEL_PMECC_SADDR);
writel(user->cache.eaddr, pmecc->regs.base + ATMEL_PMECC_EADDR);
writel(PMECC_CTRL_ENABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
writel(PMECC_CTRL_DATA, pmecc->regs.base + ATMEL_PMECC_CTRL);
return 0;
}
EXPORT_SYMBOL_GPL(atmel_pmecc_enable);
void atmel_pmecc_disable(struct atmel_pmecc_user *user)
{
atmel_pmecc_reset(user->pmecc);
mutex_unlock(&user->pmecc->lock);
}
EXPORT_SYMBOL_GPL(atmel_pmecc_disable);
int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user)
{
struct atmel_pmecc *pmecc = user->pmecc;
u32 status;
int ret;
ret = readl_relaxed_poll_timeout(pmecc->regs.base +
ATMEL_PMECC_SR,
status, !(status & PMECC_SR_BUSY), 0,
PMECC_MAX_TIMEOUT_MS * 1000);
if (ret) {
dev_err(pmecc->dev,
"Timeout while waiting for PMECC ready.\n");
return ret;
}
user->isr = readl_relaxed(pmecc->regs.base + ATMEL_PMECC_ISR);
return 0;
}
EXPORT_SYMBOL_GPL(atmel_pmecc_wait_rdy);
static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev,
const struct atmel_pmecc_caps *caps,
int pmecc_res_idx, int errloc_res_idx)
{
struct device *dev = &pdev->dev;
struct atmel_pmecc *pmecc;
pmecc = devm_kzalloc(dev, sizeof(*pmecc), GFP_KERNEL);
if (!pmecc)
return ERR_PTR(-ENOMEM);
pmecc->caps = caps;
pmecc->dev = dev;
mutex_init(&pmecc->lock);
pmecc->regs.base = devm_platform_ioremap_resource(pdev, pmecc_res_idx);
if (IS_ERR(pmecc->regs.base))
return ERR_CAST(pmecc->regs.base);
pmecc->regs.errloc = devm_platform_ioremap_resource(pdev, errloc_res_idx);
if (IS_ERR(pmecc->regs.errloc))
return ERR_CAST(pmecc->regs.errloc);
/* Disable all interrupts before registering the PMECC handler. */
writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR);
atmel_pmecc_reset(pmecc);
return pmecc;
}
static void devm_atmel_pmecc_put(struct device *dev, void *res)
{
struct atmel_pmecc **pmecc = res;
put_device((*pmecc)->dev);
}
static struct atmel_pmecc *atmel_pmecc_get_by_node(struct device *userdev,
struct device_node *np)
{
struct platform_device *pdev;
struct atmel_pmecc *pmecc, **ptr;
int ret;
pdev = of_find_device_by_node(np);
if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
pmecc = platform_get_drvdata(pdev);
if (!pmecc) {
ret = -EPROBE_DEFER;
goto err_put_device;
}
ptr = devres_alloc(devm_atmel_pmecc_put, sizeof(*ptr), GFP_KERNEL);
if (!ptr) {
ret = -ENOMEM;
goto err_put_device;
}
*ptr = pmecc;
devres_add(userdev, ptr);
return pmecc;
err_put_device:
put_device(&pdev->dev);
return ERR_PTR(ret);
}
static const int atmel_pmecc_strengths[] = { 2, 4, 8, 12, 24, 32 };
static struct atmel_pmecc_caps at91sam9g45_caps = {
.strengths = atmel_pmecc_strengths,
.nstrengths = 5,
.el_offset = 0x8c,
};
static struct atmel_pmecc_caps sama5d4_caps = {
.strengths = atmel_pmecc_strengths,
.nstrengths = 5,
.el_offset = 0x8c,
.correct_erased_chunks = true,
};
static struct atmel_pmecc_caps sama5d2_caps = {
.strengths = atmel_pmecc_strengths,
.nstrengths = 6,
.el_offset = 0xac,
.correct_erased_chunks = true,
};
static const struct of_device_id __maybe_unused atmel_pmecc_legacy_match[] = {
{ .compatible = "atmel,sama5d4-nand", &sama5d4_caps },
{ .compatible = "atmel,sama5d2-nand", &sama5d2_caps },
{ /* sentinel */ }
};
struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
{
struct atmel_pmecc *pmecc;
struct device_node *np;
if (!userdev)
return ERR_PTR(-EINVAL);
if (!userdev->of_node)
return NULL;
np = of_parse_phandle(userdev->of_node, "ecc-engine", 0);
if (np) {
pmecc = atmel_pmecc_get_by_node(userdev, np);
of_node_put(np);
} else {
/*
* Support old DT bindings: in this case the PMECC iomem
* resources are directly defined in the user pdev at position
* 1 and 2. Extract all relevant information from there.
*/
struct platform_device *pdev = to_platform_device(userdev);
const struct atmel_pmecc_caps *caps;
const struct of_device_id *match;
/* No PMECC engine available. */
if (!of_property_read_bool(userdev->of_node,
"atmel,has-pmecc"))
return NULL;
caps = &at91sam9g45_caps;
/* Find the caps associated to the NAND dev node. */
match = of_match_node(atmel_pmecc_legacy_match,
userdev->of_node);
if (match && match->data)
caps = match->data;
pmecc = atmel_pmecc_create(pdev, caps, 1, 2);
}
return pmecc;
}
EXPORT_SYMBOL(devm_atmel_pmecc_get);
static const struct of_device_id atmel_pmecc_match[] = {
{ .compatible = "atmel,at91sam9g45-pmecc", &at91sam9g45_caps },
{ .compatible = "atmel,sama5d4-pmecc", &sama5d4_caps },
{ .compatible = "atmel,sama5d2-pmecc", &sama5d2_caps },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_pmecc_match);
static int atmel_pmecc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct atmel_pmecc_caps *caps;
struct atmel_pmecc *pmecc;
caps = of_device_get_match_data(&pdev->dev);
if (!caps) {
dev_err(dev, "Invalid caps\n");
return -EINVAL;
}
pmecc = atmel_pmecc_create(pdev, caps, 0, 1);
if (IS_ERR(pmecc))
return PTR_ERR(pmecc);
platform_set_drvdata(pdev, pmecc);
return 0;
}
static struct platform_driver atmel_pmecc_driver = {
.driver = {
.name = "atmel-pmecc",
.of_match_table = atmel_pmecc_match,
},
.probe = atmel_pmecc_probe,
};
module_platform_driver(atmel_pmecc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Boris Brezillon <[email protected]>");
MODULE_DESCRIPTION("PMECC engine driver");
MODULE_ALIAS("platform:atmel_pmecc");
| linux-master | drivers/mtd/nand/raw/atmel/pmecc.c |
/*
* SMI (Serial Memory Controller) device driver for Serial NOR Flash on
* SPEAr platform
* The serial nor interface is largely based on m25p80.c, however the SPI
* interface has been replaced by SMI.
*
* Copyright © 2010 STMicroelectronics.
* Ashish Priyadarshi
* Shiraz Hashim <[email protected]>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/param.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/spear_smi.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/of.h>
#include <linux/of_address.h>
/* SMI clock rate */
#define SMI_MAX_CLOCK_FREQ 50000000 /* 50 MHz */
/* MAX time out to safely come out of a erase or write busy conditions */
#define SMI_PROBE_TIMEOUT (HZ / 10)
#define SMI_MAX_TIME_OUT (3 * HZ)
/* timeout for command completion */
#define SMI_CMD_TIMEOUT (HZ / 10)
/* registers of smi */
#define SMI_CR1 0x0 /* SMI control register 1 */
#define SMI_CR2 0x4 /* SMI control register 2 */
#define SMI_SR 0x8 /* SMI status register */
#define SMI_TR 0xC /* SMI transmit register */
#define SMI_RR 0x10 /* SMI receive register */
/* defines for control_reg 1 */
#define BANK_EN (0xF << 0) /* enables all banks */
#define DSEL_TIME (0x6 << 4) /* Deselect time 6 + 1 SMI_CK periods */
#define SW_MODE (0x1 << 28) /* enables SW Mode */
#define WB_MODE (0x1 << 29) /* Write Burst Mode */
#define FAST_MODE (0x1 << 15) /* Fast Mode */
#define HOLD1 (0x1 << 16) /* Clock Hold period selection */
/* defines for control_reg 2 */
#define SEND (0x1 << 7) /* Send data */
#define TFIE (0x1 << 8) /* Transmission Flag Interrupt Enable */
#define WCIE (0x1 << 9) /* Write Complete Interrupt Enable */
#define RD_STATUS_REG (0x1 << 10) /* reads status reg */
#define WE (0x1 << 11) /* Write Enable */
#define TX_LEN_SHIFT 0
#define RX_LEN_SHIFT 4
#define BANK_SHIFT 12
/* defines for status register */
#define SR_WIP 0x1 /* Write in progress */
#define SR_WEL 0x2 /* Write enable latch */
#define SR_BP0 0x4 /* Block protect 0 */
#define SR_BP1 0x8 /* Block protect 1 */
#define SR_BP2 0x10 /* Block protect 2 */
#define SR_SRWD 0x80 /* SR write protect */
#define TFF 0x100 /* Transfer Finished Flag */
#define WCF 0x200 /* Transfer Finished Flag */
#define ERF1 0x400 /* Forbidden Write Request */
#define ERF2 0x800 /* Forbidden Access */
#define WM_SHIFT 12
/* flash opcodes */
#define OPCODE_RDID 0x9f /* Read JEDEC ID */
/* Flash Device Ids maintenance section */
/* data structure to maintain flash ids from different vendors */
struct flash_device {
char *name;
u8 erase_cmd;
u32 device_id;
u32 pagesize;
unsigned long sectorsize;
unsigned long size_in_bytes;
};
#define FLASH_ID(n, es, id, psize, ssize, size) \
{ \
.name = n, \
.erase_cmd = es, \
.device_id = id, \
.pagesize = psize, \
.sectorsize = ssize, \
.size_in_bytes = size \
}
static struct flash_device flash_devices[] = {
FLASH_ID("st m25p16" , 0xd8, 0x00152020, 0x100, 0x10000, 0x200000),
FLASH_ID("st m25p32" , 0xd8, 0x00162020, 0x100, 0x10000, 0x400000),
FLASH_ID("st m25p64" , 0xd8, 0x00172020, 0x100, 0x10000, 0x800000),
FLASH_ID("st m25p128" , 0xd8, 0x00182020, 0x100, 0x40000, 0x1000000),
FLASH_ID("st m25p05" , 0xd8, 0x00102020, 0x80 , 0x8000 , 0x10000),
FLASH_ID("st m25p10" , 0xd8, 0x00112020, 0x80 , 0x8000 , 0x20000),
FLASH_ID("st m25p20" , 0xd8, 0x00122020, 0x100, 0x10000, 0x40000),
FLASH_ID("st m25p40" , 0xd8, 0x00132020, 0x100, 0x10000, 0x80000),
FLASH_ID("st m25p80" , 0xd8, 0x00142020, 0x100, 0x10000, 0x100000),
FLASH_ID("st m45pe10" , 0xd8, 0x00114020, 0x100, 0x10000, 0x20000),
FLASH_ID("st m45pe20" , 0xd8, 0x00124020, 0x100, 0x10000, 0x40000),
FLASH_ID("st m45pe40" , 0xd8, 0x00134020, 0x100, 0x10000, 0x80000),
FLASH_ID("st m45pe80" , 0xd8, 0x00144020, 0x100, 0x10000, 0x100000),
FLASH_ID("sp s25fl004" , 0xd8, 0x00120201, 0x100, 0x10000, 0x80000),
FLASH_ID("sp s25fl008" , 0xd8, 0x00130201, 0x100, 0x10000, 0x100000),
FLASH_ID("sp s25fl016" , 0xd8, 0x00140201, 0x100, 0x10000, 0x200000),
FLASH_ID("sp s25fl032" , 0xd8, 0x00150201, 0x100, 0x10000, 0x400000),
FLASH_ID("sp s25fl064" , 0xd8, 0x00160201, 0x100, 0x10000, 0x800000),
FLASH_ID("atmel 25f512" , 0x52, 0x0065001F, 0x80 , 0x8000 , 0x10000),
FLASH_ID("atmel 25f1024" , 0x52, 0x0060001F, 0x100, 0x8000 , 0x20000),
FLASH_ID("atmel 25f2048" , 0x52, 0x0063001F, 0x100, 0x10000, 0x40000),
FLASH_ID("atmel 25f4096" , 0x52, 0x0064001F, 0x100, 0x10000, 0x80000),
FLASH_ID("atmel 25fs040" , 0xd7, 0x0004661F, 0x100, 0x10000, 0x80000),
FLASH_ID("mac 25l512" , 0xd8, 0x001020C2, 0x010, 0x10000, 0x10000),
FLASH_ID("mac 25l1005" , 0xd8, 0x001120C2, 0x010, 0x10000, 0x20000),
FLASH_ID("mac 25l2005" , 0xd8, 0x001220C2, 0x010, 0x10000, 0x40000),
FLASH_ID("mac 25l4005" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
FLASH_ID("mac 25l4005a" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
FLASH_ID("mac 25l8005" , 0xd8, 0x001420C2, 0x010, 0x10000, 0x100000),
FLASH_ID("mac 25l1605" , 0xd8, 0x001520C2, 0x100, 0x10000, 0x200000),
FLASH_ID("mac 25l1605a" , 0xd8, 0x001520C2, 0x010, 0x10000, 0x200000),
FLASH_ID("mac 25l3205" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
FLASH_ID("mac 25l3205a" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
FLASH_ID("mac 25l6405" , 0xd8, 0x001720C2, 0x100, 0x10000, 0x800000),
};
/* Define spear specific structures */
struct spear_snor_flash;
/**
* struct spear_smi - Structure for SMI Device
*
* @clk: functional clock
* @status: current status register of SMI.
* @clk_rate: functional clock rate of SMI (default: SMI_MAX_CLOCK_FREQ)
* @lock: lock to prevent parallel access of SMI.
* @io_base: base address for registers of SMI.
* @pdev: platform device
* @cmd_complete: queue to wait for command completion of NOR-flash.
* @num_flashes: number of flashes actually present on board.
* @flash: separate structure for each Serial NOR-flash attached to SMI.
*/
struct spear_smi {
struct clk *clk;
u32 status;
unsigned long clk_rate;
struct mutex lock;
void __iomem *io_base;
struct platform_device *pdev;
wait_queue_head_t cmd_complete;
u32 num_flashes;
struct spear_snor_flash *flash[MAX_NUM_FLASH_CHIP];
};
/**
* struct spear_snor_flash - Structure for Serial NOR Flash
*
* @bank: Bank number(0, 1, 2, 3) for each NOR-flash.
* @dev_id: Device ID of NOR-flash.
* @lock: lock to manage flash read, write and erase operations
* @mtd: MTD info for each NOR-flash.
* @num_parts: Total number of partition in each bank of NOR-flash.
* @parts: Partition info for each bank of NOR-flash.
* @page_size: Page size of NOR-flash.
* @base_addr: Base address of NOR-flash.
* @erase_cmd: erase command may vary on different flash types
* @fast_mode: flash supports read in fast mode
*/
struct spear_snor_flash {
u32 bank;
u32 dev_id;
struct mutex lock;
struct mtd_info mtd;
u32 num_parts;
struct mtd_partition *parts;
u32 page_size;
void __iomem *base_addr;
u8 erase_cmd;
u8 fast_mode;
};
static inline struct spear_snor_flash *get_flash_data(struct mtd_info *mtd)
{
return container_of(mtd, struct spear_snor_flash, mtd);
}
/**
* spear_smi_read_sr - Read status register of flash through SMI
* @dev: structure of SMI information.
* @bank: bank to which flash is connected
*
* This routine will return the status register of the flash chip present at the
* given bank.
*/
static int spear_smi_read_sr(struct spear_smi *dev, u32 bank)
{
int ret;
u32 ctrlreg1;
mutex_lock(&dev->lock);
dev->status = 0; /* Will be set in interrupt handler */
ctrlreg1 = readl(dev->io_base + SMI_CR1);
/* program smi in hw mode */
writel(ctrlreg1 & ~(SW_MODE | WB_MODE), dev->io_base + SMI_CR1);
/* performing a rsr instruction in hw mode */
writel((bank << BANK_SHIFT) | RD_STATUS_REG | TFIE,
dev->io_base + SMI_CR2);
/* wait for tff */
ret = wait_event_interruptible_timeout(dev->cmd_complete,
dev->status & TFF, SMI_CMD_TIMEOUT);
/* copy dev->status (lower 16 bits) in order to release lock */
if (ret > 0)
ret = dev->status & 0xffff;
else if (ret == 0)
ret = -ETIMEDOUT;
/* restore the ctrl regs state */
writel(ctrlreg1, dev->io_base + SMI_CR1);
writel(0, dev->io_base + SMI_CR2);
mutex_unlock(&dev->lock);
return ret;
}
/**
* spear_smi_wait_till_ready - wait till flash is ready
* @dev: structure of SMI information.
* @bank: flash corresponding to this bank
* @timeout: timeout for busy wait condition
*
* This routine checks for WIP (write in progress) bit in Status register
* If successful the routine returns 0 else -EBUSY
*/
static int spear_smi_wait_till_ready(struct spear_smi *dev, u32 bank,
unsigned long timeout)
{
unsigned long finish;
int status;
finish = jiffies + timeout;
do {
status = spear_smi_read_sr(dev, bank);
if (status < 0) {
if (status == -ETIMEDOUT)
continue; /* try till finish */
return status;
} else if (!(status & SR_WIP)) {
return 0;
}
cond_resched();
} while (!time_after_eq(jiffies, finish));
dev_err(&dev->pdev->dev, "smi controller is busy, timeout\n");
return -EBUSY;
}
/**
* spear_smi_int_handler - SMI Interrupt Handler.
* @irq: irq number
* @dev_id: structure of SMI device, embedded in dev_id.
*
* The handler clears all interrupt conditions and records the status in
* dev->status which is used by the driver later.
*/
static irqreturn_t spear_smi_int_handler(int irq, void *dev_id)
{
u32 status = 0;
struct spear_smi *dev = dev_id;
status = readl(dev->io_base + SMI_SR);
if (unlikely(!status))
return IRQ_NONE;
/* clear all interrupt conditions */
writel(0, dev->io_base + SMI_SR);
/* copy the status register in dev->status */
dev->status |= status;
/* send the completion */
wake_up_interruptible(&dev->cmd_complete);
return IRQ_HANDLED;
}
/**
* spear_smi_hw_init - initializes the smi controller.
* @dev: structure of smi device
*
* this routine initializes the smi controller wit the default values
*/
static void spear_smi_hw_init(struct spear_smi *dev)
{
unsigned long rate = 0;
u32 prescale = 0;
u32 val;
rate = clk_get_rate(dev->clk);
/* functional clock of smi */
prescale = DIV_ROUND_UP(rate, dev->clk_rate);
/*
* setting the standard values, fast mode, prescaler for
* SMI_MAX_CLOCK_FREQ (50MHz) operation and bank enable
*/
val = HOLD1 | BANK_EN | DSEL_TIME | (prescale << 8);
mutex_lock(&dev->lock);
/* clear all interrupt conditions */
writel(0, dev->io_base + SMI_SR);
writel(val, dev->io_base + SMI_CR1);
mutex_unlock(&dev->lock);
}
/**
* get_flash_index - match chip id from a flash list.
* @flash_id: a valid nor flash chip id obtained from board.
*
* try to validate the chip id by matching from a list, if not found then simply
* returns negative. In case of success returns index in to the flash devices
* array.
*/
static int get_flash_index(u32 flash_id)
{
int index;
/* Matches chip-id to entire list of 'serial-nor flash' ids */
for (index = 0; index < ARRAY_SIZE(flash_devices); index++) {
if (flash_devices[index].device_id == flash_id)
return index;
}
/* Memory chip is not listed and not supported */
return -ENODEV;
}
/**
* spear_smi_write_enable - Enable the flash to do write operation
* @dev: structure of SMI device
* @bank: enable write for flash connected to this bank
*
* Set write enable latch with Write Enable command.
* Returns 0 on success.
*/
static int spear_smi_write_enable(struct spear_smi *dev, u32 bank)
{
int ret;
u32 ctrlreg1;
mutex_lock(&dev->lock);
dev->status = 0; /* Will be set in interrupt handler */
ctrlreg1 = readl(dev->io_base + SMI_CR1);
/* program smi in h/w mode */
writel(ctrlreg1 & ~SW_MODE, dev->io_base + SMI_CR1);
/* give the flash, write enable command */
writel((bank << BANK_SHIFT) | WE | TFIE, dev->io_base + SMI_CR2);
ret = wait_event_interruptible_timeout(dev->cmd_complete,
dev->status & TFF, SMI_CMD_TIMEOUT);
/* restore the ctrl regs state */
writel(ctrlreg1, dev->io_base + SMI_CR1);
writel(0, dev->io_base + SMI_CR2);
if (ret == 0) {
ret = -EIO;
dev_err(&dev->pdev->dev,
"smi controller failed on write enable\n");
} else if (ret > 0) {
/* check whether write mode status is set for required bank */
if (dev->status & (1 << (bank + WM_SHIFT)))
ret = 0;
else {
dev_err(&dev->pdev->dev, "couldn't enable write\n");
ret = -EIO;
}
}
mutex_unlock(&dev->lock);
return ret;
}
static inline u32
get_sector_erase_cmd(struct spear_snor_flash *flash, u32 offset)
{
u32 cmd;
u8 *x = (u8 *)&cmd;
x[0] = flash->erase_cmd;
x[1] = offset >> 16;
x[2] = offset >> 8;
x[3] = offset;
return cmd;
}
/**
* spear_smi_erase_sector - erase one sector of flash
* @dev: structure of SMI information
* @command: erase command to be send
* @bank: bank to which this command needs to be send
* @bytes: size of command
*
* Erase one sector of flash memory at offset ``offset'' which is any
* address within the sector which should be erased.
* Returns 0 if successful, non-zero otherwise.
*/
static int spear_smi_erase_sector(struct spear_smi *dev,
u32 bank, u32 command, u32 bytes)
{
u32 ctrlreg1 = 0;
int ret;
ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
if (ret)
return ret;
ret = spear_smi_write_enable(dev, bank);
if (ret)
return ret;
mutex_lock(&dev->lock);
ctrlreg1 = readl(dev->io_base + SMI_CR1);
writel((ctrlreg1 | SW_MODE) & ~WB_MODE, dev->io_base + SMI_CR1);
/* send command in sw mode */
writel(command, dev->io_base + SMI_TR);
writel((bank << BANK_SHIFT) | SEND | TFIE | (bytes << TX_LEN_SHIFT),
dev->io_base + SMI_CR2);
ret = wait_event_interruptible_timeout(dev->cmd_complete,
dev->status & TFF, SMI_CMD_TIMEOUT);
if (ret == 0) {
ret = -EIO;
dev_err(&dev->pdev->dev, "sector erase failed\n");
} else if (ret > 0)
ret = 0; /* success */
/* restore ctrl regs */
writel(ctrlreg1, dev->io_base + SMI_CR1);
writel(0, dev->io_base + SMI_CR2);
mutex_unlock(&dev->lock);
return ret;
}
/**
* spear_mtd_erase - perform flash erase operation as requested by user
* @mtd: Provides the memory characteristics
* @e_info: Provides the erase information
*
* Erase an address range on the flash chip. The address range may extend
* one or more erase sectors. Return an error is there is a problem erasing.
*/
static int spear_mtd_erase(struct mtd_info *mtd, struct erase_info *e_info)
{
struct spear_snor_flash *flash = get_flash_data(mtd);
struct spear_smi *dev = mtd->priv;
u32 addr, command, bank;
int len, ret;
if (!flash || !dev)
return -ENODEV;
bank = flash->bank;
if (bank > dev->num_flashes - 1) {
dev_err(&dev->pdev->dev, "Invalid Bank Num");
return -EINVAL;
}
addr = e_info->addr;
len = e_info->len;
mutex_lock(&flash->lock);
/* now erase sectors in loop */
while (len) {
command = get_sector_erase_cmd(flash, addr);
/* preparing the command for flash */
ret = spear_smi_erase_sector(dev, bank, command, 4);
if (ret) {
mutex_unlock(&flash->lock);
return ret;
}
addr += mtd->erasesize;
len -= mtd->erasesize;
}
mutex_unlock(&flash->lock);
return 0;
}
/**
* spear_mtd_read - performs flash read operation as requested by the user
* @mtd: MTD information of the memory bank
* @from: Address from which to start read
* @len: Number of bytes to be read
* @retlen: Fills the Number of bytes actually read
* @buf: Fills this after reading
*
* Read an address range from the flash chip. The address range
* may be any size provided it is within the physical boundaries.
* Returns 0 on success, non zero otherwise
*/
static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u8 *buf)
{
struct spear_snor_flash *flash = get_flash_data(mtd);
struct spear_smi *dev = mtd->priv;
void __iomem *src;
u32 ctrlreg1, val;
int ret;
if (!flash || !dev)
return -ENODEV;
if (flash->bank > dev->num_flashes - 1) {
dev_err(&dev->pdev->dev, "Invalid Bank Num");
return -EINVAL;
}
/* select address as per bank number */
src = flash->base_addr + from;
mutex_lock(&flash->lock);
/* wait till previous write/erase is done. */
ret = spear_smi_wait_till_ready(dev, flash->bank, SMI_MAX_TIME_OUT);
if (ret) {
mutex_unlock(&flash->lock);
return ret;
}
mutex_lock(&dev->lock);
/* put smi in hw mode not wbt mode */
ctrlreg1 = val = readl(dev->io_base + SMI_CR1);
val &= ~(SW_MODE | WB_MODE);
if (flash->fast_mode)
val |= FAST_MODE;
writel(val, dev->io_base + SMI_CR1);
memcpy_fromio(buf, src, len);
/* restore ctrl reg1 */
writel(ctrlreg1, dev->io_base + SMI_CR1);
mutex_unlock(&dev->lock);
*retlen = len;
mutex_unlock(&flash->lock);
return 0;
}
/*
* The purpose of this function is to ensure a memcpy_toio() with byte writes
* only. Its structure is inspired from the ARM implementation of _memcpy_toio()
* which also does single byte writes but cannot be used here as this is just an
* implementation detail and not part of the API. Not mentioning the comment
* stating that _memcpy_toio() should be optimized.
*/
static void spear_smi_memcpy_toio_b(volatile void __iomem *dest,
const void *src, size_t len)
{
const unsigned char *from = src;
while (len) {
len--;
writeb(*from, dest);
from++;
dest++;
}
}
static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
void __iomem *dest, const void *src, size_t len)
{
int ret;
u32 ctrlreg1;
/* wait until finished previous write command. */
ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
if (ret)
return ret;
/* put smi in write enable */
ret = spear_smi_write_enable(dev, bank);
if (ret)
return ret;
/* put smi in hw, write burst mode */
mutex_lock(&dev->lock);
ctrlreg1 = readl(dev->io_base + SMI_CR1);
writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1);
/*
* In Write Burst mode (WB_MODE), the specs states that writes must be:
* - incremental
* - of the same size
* The ARM implementation of memcpy_toio() will optimize the number of
* I/O by using as much 4-byte writes as possible, surrounded by
* 2-byte/1-byte access if:
* - the destination is not 4-byte aligned
* - the length is not a multiple of 4-byte.
* Avoid this alternance of write access size by using our own 'byte
* access' helper if at least one of the two conditions above is true.
*/
if (IS_ALIGNED(len, sizeof(u32)) &&
IS_ALIGNED((uintptr_t)dest, sizeof(u32)))
memcpy_toio(dest, src, len);
else
spear_smi_memcpy_toio_b(dest, src, len);
writel(ctrlreg1, dev->io_base + SMI_CR1);
mutex_unlock(&dev->lock);
return 0;
}
/**
* spear_mtd_write - performs write operation as requested by the user.
* @mtd: MTD information of the memory bank.
* @to: Address to write.
* @len: Number of bytes to be written.
* @retlen: Number of bytes actually wrote.
* @buf: Buffer from which the data to be taken.
*
* Write an address range to the flash chip. Data must be written in
* flash_page_size chunks. The address range may be any size provided
* it is within the physical boundaries.
* Returns 0 on success, non zero otherwise
*/
static int spear_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u8 *buf)
{
struct spear_snor_flash *flash = get_flash_data(mtd);
struct spear_smi *dev = mtd->priv;
void __iomem *dest;
u32 page_offset, page_size;
int ret;
if (!flash || !dev)
return -ENODEV;
if (flash->bank > dev->num_flashes - 1) {
dev_err(&dev->pdev->dev, "Invalid Bank Num");
return -EINVAL;
}
/* select address as per bank number */
dest = flash->base_addr + to;
mutex_lock(&flash->lock);
page_offset = (u32)to % flash->page_size;
/* do if all the bytes fit onto one page */
if (page_offset + len <= flash->page_size) {
ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf, len);
if (!ret)
*retlen += len;
} else {
u32 i;
/* the size of data remaining on the first page */
page_size = flash->page_size - page_offset;
ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf,
page_size);
if (ret)
goto err_write;
else
*retlen += page_size;
/* write everything in pagesize chunks */
for (i = page_size; i < len; i += page_size) {
page_size = len - i;
if (page_size > flash->page_size)
page_size = flash->page_size;
ret = spear_smi_cpy_toio(dev, flash->bank, dest + i,
buf + i, page_size);
if (ret)
break;
else
*retlen += page_size;
}
}
err_write:
mutex_unlock(&flash->lock);
return ret;
}
/**
* spear_smi_probe_flash - Detects the NOR Flash chip.
* @dev: structure of SMI information.
* @bank: bank on which flash must be probed
*
* This routine will check whether there exists a flash chip on a given memory
* bank ID.
* Return index of the probed flash in flash devices structure
*/
static int spear_smi_probe_flash(struct spear_smi *dev, u32 bank)
{
int ret;
u32 val = 0;
ret = spear_smi_wait_till_ready(dev, bank, SMI_PROBE_TIMEOUT);
if (ret)
return ret;
mutex_lock(&dev->lock);
dev->status = 0; /* Will be set in interrupt handler */
/* put smi in sw mode */
val = readl(dev->io_base + SMI_CR1);
writel(val | SW_MODE, dev->io_base + SMI_CR1);
/* send readid command in sw mode */
writel(OPCODE_RDID, dev->io_base + SMI_TR);
val = (bank << BANK_SHIFT) | SEND | (1 << TX_LEN_SHIFT) |
(3 << RX_LEN_SHIFT) | TFIE;
writel(val, dev->io_base + SMI_CR2);
/* wait for TFF */
ret = wait_event_interruptible_timeout(dev->cmd_complete,
dev->status & TFF, SMI_CMD_TIMEOUT);
if (ret <= 0) {
ret = -ENODEV;
goto err_probe;
}
/* get memory chip id */
val = readl(dev->io_base + SMI_RR);
val &= 0x00ffffff;
ret = get_flash_index(val);
err_probe:
/* clear sw mode */
val = readl(dev->io_base + SMI_CR1);
writel(val & ~SW_MODE, dev->io_base + SMI_CR1);
mutex_unlock(&dev->lock);
return ret;
}
#ifdef CONFIG_OF
static int spear_smi_probe_config_dt(struct platform_device *pdev,
struct device_node *np)
{
struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *pp;
const __be32 *addr;
u32 val;
int len;
int i = 0;
if (!np)
return -ENODEV;
of_property_read_u32(np, "clock-rate", &val);
pdata->clk_rate = val;
pdata->board_flash_info = devm_kzalloc(&pdev->dev,
sizeof(*pdata->board_flash_info),
GFP_KERNEL);
if (!pdata->board_flash_info)
return -ENOMEM;
/* Fill structs for each subnode (flash device) */
for_each_child_of_node(np, pp) {
pdata->np[i] = pp;
/* Read base-addr and size from DT */
addr = of_get_property(pp, "reg", &len);
pdata->board_flash_info->mem_base = be32_to_cpup(&addr[0]);
pdata->board_flash_info->size = be32_to_cpup(&addr[1]);
pdata->board_flash_info->fast_mode =
of_property_read_bool(pp, "st,smi-fast-mode");
i++;
}
pdata->num_flashes = i;
return 0;
}
#else
static int spear_smi_probe_config_dt(struct platform_device *pdev,
struct device_node *np)
{
return -ENOSYS;
}
#endif
static int spear_smi_setup_banks(struct platform_device *pdev,
u32 bank, struct device_node *np)
{
struct spear_smi *dev = platform_get_drvdata(pdev);
struct spear_smi_flash_info *flash_info;
struct spear_smi_plat_data *pdata;
struct spear_snor_flash *flash;
struct mtd_partition *parts = NULL;
int count = 0;
int flash_index;
int ret = 0;
pdata = dev_get_platdata(&pdev->dev);
if (bank > pdata->num_flashes - 1)
return -EINVAL;
flash_info = &pdata->board_flash_info[bank];
if (!flash_info)
return -ENODEV;
flash = devm_kzalloc(&pdev->dev, sizeof(*flash), GFP_ATOMIC);
if (!flash)
return -ENOMEM;
flash->bank = bank;
flash->fast_mode = flash_info->fast_mode ? 1 : 0;
mutex_init(&flash->lock);
/* verify whether nor flash is really present on board */
flash_index = spear_smi_probe_flash(dev, bank);
if (flash_index < 0) {
dev_info(&dev->pdev->dev, "smi-nor%d not found\n", bank);
return flash_index;
}
/* map the memory for nor flash chip */
flash->base_addr = devm_ioremap(&pdev->dev, flash_info->mem_base,
flash_info->size);
if (!flash->base_addr)
return -EIO;
dev->flash[bank] = flash;
flash->mtd.priv = dev;
if (flash_info->name)
flash->mtd.name = flash_info->name;
else
flash->mtd.name = flash_devices[flash_index].name;
flash->mtd.dev.parent = &pdev->dev;
mtd_set_of_node(&flash->mtd, np);
flash->mtd.type = MTD_NORFLASH;
flash->mtd.writesize = 1;
flash->mtd.flags = MTD_CAP_NORFLASH;
flash->mtd.size = flash_info->size;
flash->mtd.erasesize = flash_devices[flash_index].sectorsize;
flash->page_size = flash_devices[flash_index].pagesize;
flash->mtd.writebufsize = flash->page_size;
flash->erase_cmd = flash_devices[flash_index].erase_cmd;
flash->mtd._erase = spear_mtd_erase;
flash->mtd._read = spear_mtd_read;
flash->mtd._write = spear_mtd_write;
flash->dev_id = flash_devices[flash_index].device_id;
dev_info(&dev->pdev->dev, "mtd .name=%s .size=%llx(%lluM)\n",
flash->mtd.name, flash->mtd.size,
flash->mtd.size / (1024 * 1024));
dev_info(&dev->pdev->dev, ".erasesize = 0x%x(%uK)\n",
flash->mtd.erasesize, flash->mtd.erasesize / 1024);
#ifndef CONFIG_OF
if (flash_info->partitions) {
parts = flash_info->partitions;
count = flash_info->nr_partitions;
}
#endif
ret = mtd_device_register(&flash->mtd, parts, count);
if (ret) {
dev_err(&dev->pdev->dev, "Err MTD partition=%d\n", ret);
return ret;
}
return 0;
}
/**
* spear_smi_probe - Entry routine
* @pdev: platform device structure
*
* This is the first routine which gets invoked during booting and does all
* initialization/allocation work. The routine looks for available memory banks,
* and do proper init for any found one.
* Returns 0 on success, non zero otherwise
*/
static int spear_smi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct spear_smi_plat_data *pdata = NULL;
struct spear_smi *dev;
int irq, ret = 0;
int i;
if (np) {
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
ret = -ENOMEM;
goto err;
}
pdev->dev.platform_data = pdata;
ret = spear_smi_probe_config_dt(pdev, np);
if (ret) {
ret = -ENODEV;
dev_err(&pdev->dev, "no platform data\n");
goto err;
}
} else {
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
ret = -ENODEV;
dev_err(&pdev->dev, "no platform data\n");
goto err;
}
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENODEV;
goto err;
}
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
goto err;
}
dev->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->io_base)) {
ret = PTR_ERR(dev->io_base);
goto err;
}
dev->pdev = pdev;
dev->clk_rate = pdata->clk_rate;
if (dev->clk_rate > SMI_MAX_CLOCK_FREQ)
dev->clk_rate = SMI_MAX_CLOCK_FREQ;
dev->num_flashes = pdata->num_flashes;
if (dev->num_flashes > MAX_NUM_FLASH_CHIP) {
dev_err(&pdev->dev, "exceeding max number of flashes\n");
dev->num_flashes = MAX_NUM_FLASH_CHIP;
}
dev->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(dev->clk)) {
ret = PTR_ERR(dev->clk);
goto err;
}
ret = devm_request_irq(&pdev->dev, irq, spear_smi_int_handler, 0,
pdev->name, dev);
if (ret) {
dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n");
goto err;
}
mutex_init(&dev->lock);
init_waitqueue_head(&dev->cmd_complete);
spear_smi_hw_init(dev);
platform_set_drvdata(pdev, dev);
/* loop for each serial nor-flash which is connected to smi */
for (i = 0; i < dev->num_flashes; i++) {
ret = spear_smi_setup_banks(pdev, i, pdata->np[i]);
if (ret) {
dev_err(&dev->pdev->dev, "bank setup failed\n");
goto err;
}
}
return 0;
err:
return ret;
}
/**
* spear_smi_remove - Exit routine
* @pdev: platform device structure
*
* free all allocations and delete the partitions.
*/
static int spear_smi_remove(struct platform_device *pdev)
{
struct spear_smi *dev;
struct spear_snor_flash *flash;
int i;
dev = platform_get_drvdata(pdev);
/* clean up for all nor flash */
for (i = 0; i < dev->num_flashes; i++) {
flash = dev->flash[i];
if (!flash)
continue;
/* clean up mtd stuff */
WARN_ON(mtd_device_unregister(&flash->mtd));
}
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int spear_smi_suspend(struct device *dev)
{
struct spear_smi *sdev = dev_get_drvdata(dev);
if (sdev && sdev->clk)
clk_disable_unprepare(sdev->clk);
return 0;
}
static int spear_smi_resume(struct device *dev)
{
struct spear_smi *sdev = dev_get_drvdata(dev);
int ret = -EPERM;
if (sdev && sdev->clk)
ret = clk_prepare_enable(sdev->clk);
if (!ret)
spear_smi_hw_init(sdev);
return ret;
}
#endif
static SIMPLE_DEV_PM_OPS(spear_smi_pm_ops, spear_smi_suspend, spear_smi_resume);
#ifdef CONFIG_OF
static const struct of_device_id spear_smi_id_table[] = {
{ .compatible = "st,spear600-smi" },
{}
};
MODULE_DEVICE_TABLE(of, spear_smi_id_table);
#endif
static struct platform_driver spear_smi_driver = {
.driver = {
.name = "smi",
.bus = &platform_bus_type,
.of_match_table = of_match_ptr(spear_smi_id_table),
.pm = &spear_smi_pm_ops,
},
.probe = spear_smi_probe,
.remove = spear_smi_remove,
};
module_platform_driver(spear_smi_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <[email protected]>");
MODULE_DESCRIPTION("MTD SMI driver for serial nor flash chips");
| linux-master | drivers/mtd/devices/spear_smi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* st_spi_fsm.c - ST Fast Sequence Mode (FSM) Serial Flash Controller
*
* Author: Angus Clark <[email protected]>
*
* Copyright (C) 2010-2014 STMicroelectronics Limited
*
* JEDEC probe based on drivers/mtd/devices/m25p80.c
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/spi-nor.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/clk.h>
#include "serial_flash_cmds.h"
/*
* FSM SPI Controller Registers
*/
#define SPI_CLOCKDIV 0x0010
#define SPI_MODESELECT 0x0018
#define SPI_CONFIGDATA 0x0020
#define SPI_STA_MODE_CHANGE 0x0028
#define SPI_FAST_SEQ_TRANSFER_SIZE 0x0100
#define SPI_FAST_SEQ_ADD1 0x0104
#define SPI_FAST_SEQ_ADD2 0x0108
#define SPI_FAST_SEQ_ADD_CFG 0x010c
#define SPI_FAST_SEQ_OPC1 0x0110
#define SPI_FAST_SEQ_OPC2 0x0114
#define SPI_FAST_SEQ_OPC3 0x0118
#define SPI_FAST_SEQ_OPC4 0x011c
#define SPI_FAST_SEQ_OPC5 0x0120
#define SPI_MODE_BITS 0x0124
#define SPI_DUMMY_BITS 0x0128
#define SPI_FAST_SEQ_FLASH_STA_DATA 0x012c
#define SPI_FAST_SEQ_1 0x0130
#define SPI_FAST_SEQ_2 0x0134
#define SPI_FAST_SEQ_3 0x0138
#define SPI_FAST_SEQ_4 0x013c
#define SPI_FAST_SEQ_CFG 0x0140
#define SPI_FAST_SEQ_STA 0x0144
#define SPI_QUAD_BOOT_SEQ_INIT_1 0x0148
#define SPI_QUAD_BOOT_SEQ_INIT_2 0x014c
#define SPI_QUAD_BOOT_READ_SEQ_1 0x0150
#define SPI_QUAD_BOOT_READ_SEQ_2 0x0154
#define SPI_PROGRAM_ERASE_TIME 0x0158
#define SPI_MULT_PAGE_REPEAT_SEQ_1 0x015c
#define SPI_MULT_PAGE_REPEAT_SEQ_2 0x0160
#define SPI_STATUS_WR_TIME_REG 0x0164
#define SPI_FAST_SEQ_DATA_REG 0x0300
/*
* Register: SPI_MODESELECT
*/
#define SPI_MODESELECT_CONTIG 0x01
#define SPI_MODESELECT_FASTREAD 0x02
#define SPI_MODESELECT_DUALIO 0x04
#define SPI_MODESELECT_FSM 0x08
#define SPI_MODESELECT_QUADBOOT 0x10
/*
* Register: SPI_CONFIGDATA
*/
#define SPI_CFG_DEVICE_ST 0x1
#define SPI_CFG_DEVICE_ATMEL 0x4
#define SPI_CFG_MIN_CS_HIGH(x) (((x) & 0xfff) << 4)
#define SPI_CFG_CS_SETUPHOLD(x) (((x) & 0xff) << 16)
#define SPI_CFG_DATA_HOLD(x) (((x) & 0xff) << 24)
#define SPI_CFG_DEFAULT_MIN_CS_HIGH SPI_CFG_MIN_CS_HIGH(0x0AA)
#define SPI_CFG_DEFAULT_CS_SETUPHOLD SPI_CFG_CS_SETUPHOLD(0xA0)
#define SPI_CFG_DEFAULT_DATA_HOLD SPI_CFG_DATA_HOLD(0x00)
/*
* Register: SPI_FAST_SEQ_TRANSFER_SIZE
*/
#define TRANSFER_SIZE(x) ((x) * 8)
/*
* Register: SPI_FAST_SEQ_ADD_CFG
*/
#define ADR_CFG_CYCLES_ADD1(x) ((x) << 0)
#define ADR_CFG_PADS_1_ADD1 (0x0 << 6)
#define ADR_CFG_PADS_2_ADD1 (0x1 << 6)
#define ADR_CFG_PADS_4_ADD1 (0x3 << 6)
#define ADR_CFG_CSDEASSERT_ADD1 (1 << 8)
#define ADR_CFG_CYCLES_ADD2(x) ((x) << (0+16))
#define ADR_CFG_PADS_1_ADD2 (0x0 << (6+16))
#define ADR_CFG_PADS_2_ADD2 (0x1 << (6+16))
#define ADR_CFG_PADS_4_ADD2 (0x3 << (6+16))
#define ADR_CFG_CSDEASSERT_ADD2 (1 << (8+16))
/*
* Register: SPI_FAST_SEQ_n
*/
#define SEQ_OPC_OPCODE(x) ((x) << 0)
#define SEQ_OPC_CYCLES(x) ((x) << 8)
#define SEQ_OPC_PADS_1 (0x0 << 14)
#define SEQ_OPC_PADS_2 (0x1 << 14)
#define SEQ_OPC_PADS_4 (0x3 << 14)
#define SEQ_OPC_CSDEASSERT (1 << 16)
/*
* Register: SPI_FAST_SEQ_CFG
*/
#define SEQ_CFG_STARTSEQ (1 << 0)
#define SEQ_CFG_SWRESET (1 << 5)
#define SEQ_CFG_CSDEASSERT (1 << 6)
#define SEQ_CFG_READNOTWRITE (1 << 7)
#define SEQ_CFG_ERASE (1 << 8)
#define SEQ_CFG_PADS_1 (0x0 << 16)
#define SEQ_CFG_PADS_2 (0x1 << 16)
#define SEQ_CFG_PADS_4 (0x3 << 16)
/*
* Register: SPI_MODE_BITS
*/
#define MODE_DATA(x) (x & 0xff)
#define MODE_CYCLES(x) ((x & 0x3f) << 16)
#define MODE_PADS_1 (0x0 << 22)
#define MODE_PADS_2 (0x1 << 22)
#define MODE_PADS_4 (0x3 << 22)
#define DUMMY_CSDEASSERT (1 << 24)
/*
* Register: SPI_DUMMY_BITS
*/
#define DUMMY_CYCLES(x) ((x & 0x3f) << 16)
#define DUMMY_PADS_1 (0x0 << 22)
#define DUMMY_PADS_2 (0x1 << 22)
#define DUMMY_PADS_4 (0x3 << 22)
#define DUMMY_CSDEASSERT (1 << 24)
/*
* Register: SPI_FAST_SEQ_FLASH_STA_DATA
*/
#define STA_DATA_BYTE1(x) ((x & 0xff) << 0)
#define STA_DATA_BYTE2(x) ((x & 0xff) << 8)
#define STA_PADS_1 (0x0 << 16)
#define STA_PADS_2 (0x1 << 16)
#define STA_PADS_4 (0x3 << 16)
#define STA_CSDEASSERT (0x1 << 20)
#define STA_RDNOTWR (0x1 << 21)
/*
* FSM SPI Instruction Opcodes
*/
#define STFSM_OPC_CMD 0x1
#define STFSM_OPC_ADD 0x2
#define STFSM_OPC_STA 0x3
#define STFSM_OPC_MODE 0x4
#define STFSM_OPC_DUMMY 0x5
#define STFSM_OPC_DATA 0x6
#define STFSM_OPC_WAIT 0x7
#define STFSM_OPC_JUMP 0x8
#define STFSM_OPC_GOTO 0x9
#define STFSM_OPC_STOP 0xF
/*
* FSM SPI Instructions (== opcode + operand).
*/
#define STFSM_INSTR(cmd, op) ((cmd) | ((op) << 4))
#define STFSM_INST_CMD1 STFSM_INSTR(STFSM_OPC_CMD, 1)
#define STFSM_INST_CMD2 STFSM_INSTR(STFSM_OPC_CMD, 2)
#define STFSM_INST_CMD3 STFSM_INSTR(STFSM_OPC_CMD, 3)
#define STFSM_INST_CMD4 STFSM_INSTR(STFSM_OPC_CMD, 4)
#define STFSM_INST_CMD5 STFSM_INSTR(STFSM_OPC_CMD, 5)
#define STFSM_INST_ADD1 STFSM_INSTR(STFSM_OPC_ADD, 1)
#define STFSM_INST_ADD2 STFSM_INSTR(STFSM_OPC_ADD, 2)
#define STFSM_INST_DATA_WRITE STFSM_INSTR(STFSM_OPC_DATA, 1)
#define STFSM_INST_DATA_READ STFSM_INSTR(STFSM_OPC_DATA, 2)
#define STFSM_INST_STA_RD1 STFSM_INSTR(STFSM_OPC_STA, 0x1)
#define STFSM_INST_STA_WR1 STFSM_INSTR(STFSM_OPC_STA, 0x1)
#define STFSM_INST_STA_RD2 STFSM_INSTR(STFSM_OPC_STA, 0x2)
#define STFSM_INST_STA_WR1_2 STFSM_INSTR(STFSM_OPC_STA, 0x3)
#define STFSM_INST_MODE STFSM_INSTR(STFSM_OPC_MODE, 0)
#define STFSM_INST_DUMMY STFSM_INSTR(STFSM_OPC_DUMMY, 0)
#define STFSM_INST_WAIT STFSM_INSTR(STFSM_OPC_WAIT, 0)
#define STFSM_INST_STOP STFSM_INSTR(STFSM_OPC_STOP, 0)
#define STFSM_DEFAULT_EMI_FREQ 100000000UL /* 100 MHz */
#define STFSM_DEFAULT_WR_TIME (STFSM_DEFAULT_EMI_FREQ * (15/1000)) /* 15ms */
#define STFSM_FLASH_SAFE_FREQ 10000000UL /* 10 MHz */
#define STFSM_MAX_WAIT_SEQ_MS 1000 /* FSM execution time */
/* S25FLxxxS commands */
#define S25FL_CMD_WRITE4_1_1_4 0x34
#define S25FL_CMD_SE4 0xdc
#define S25FL_CMD_CLSR 0x30
#define S25FL_CMD_DYBWR 0xe1
#define S25FL_CMD_DYBRD 0xe0
#define S25FL_CMD_WRITE4 0x12 /* Note, opcode clashes with
* 'SPINOR_OP_WRITE_1_4_4'
* as found on N25Qxxx devices! */
/* Status register */
#define FLASH_STATUS_BUSY 0x01
#define FLASH_STATUS_WEL 0x02
#define FLASH_STATUS_BP0 0x04
#define FLASH_STATUS_BP1 0x08
#define FLASH_STATUS_BP2 0x10
#define FLASH_STATUS_SRWP0 0x80
#define FLASH_STATUS_TIMEOUT 0xff
/* S25FL Error Flags */
#define S25FL_STATUS_E_ERR 0x20
#define S25FL_STATUS_P_ERR 0x40
#define N25Q_CMD_WRVCR 0x81
#define N25Q_CMD_RDVCR 0x85
#define N25Q_CMD_RDVECR 0x65
#define N25Q_CMD_RDNVCR 0xb5
#define N25Q_CMD_WRNVCR 0xb1
#define FLASH_PAGESIZE 256 /* In Bytes */
#define FLASH_PAGESIZE_32 (FLASH_PAGESIZE / 4) /* In uint32_t */
#define FLASH_MAX_BUSY_WAIT (300 * HZ) /* Maximum 'CHIPERASE' time */
/*
* Flags to tweak operation of default read/write/erase routines
*/
#define CFG_READ_TOGGLE_32BIT_ADDR 0x00000001
#define CFG_WRITE_TOGGLE_32BIT_ADDR 0x00000002
#define CFG_ERASESEC_TOGGLE_32BIT_ADDR 0x00000008
#define CFG_S25FL_CHECK_ERROR_FLAGS 0x00000010
struct stfsm_seq {
uint32_t data_size;
uint32_t addr1;
uint32_t addr2;
uint32_t addr_cfg;
uint32_t seq_opc[5];
uint32_t mode;
uint32_t dummy;
uint32_t status;
uint8_t seq[16];
uint32_t seq_cfg;
} __packed __aligned(4);
struct stfsm {
struct device *dev;
void __iomem *base;
struct mtd_info mtd;
struct mutex lock;
struct flash_info *info;
struct clk *clk;
uint32_t configuration;
uint32_t fifo_dir_delay;
bool booted_from_spi;
bool reset_signal;
bool reset_por;
struct stfsm_seq stfsm_seq_read;
struct stfsm_seq stfsm_seq_write;
struct stfsm_seq stfsm_seq_en_32bit_addr;
};
/* Parameters to configure a READ or WRITE FSM sequence */
struct seq_rw_config {
uint32_t flags; /* flags to support config */
uint8_t cmd; /* FLASH command */
int write; /* Write Sequence */
uint8_t addr_pads; /* No. of addr pads (MODE & DUMMY) */
uint8_t data_pads; /* No. of data pads */
uint8_t mode_data; /* MODE data */
uint8_t mode_cycles; /* No. of MODE cycles */
uint8_t dummy_cycles; /* No. of DUMMY cycles */
};
/* SPI Flash Device Table */
struct flash_info {
char *name;
/*
* JEDEC id zero means "no ID" (most older chips); otherwise it has
* a high byte of zero plus three data bytes: the manufacturer id,
* then a two byte device id.
*/
u32 jedec_id;
u16 ext_id;
/*
* The size listed here is what works with SPINOR_OP_SE, which isn't
* necessarily called a "sector" by the vendor.
*/
unsigned sector_size;
u16 n_sectors;
u32 flags;
/*
* Note, where FAST_READ is supported, freq_max specifies the
* FAST_READ frequency, not the READ frequency.
*/
u32 max_freq;
int (*config)(struct stfsm *);
};
static int stfsm_n25q_config(struct stfsm *fsm);
static int stfsm_mx25_config(struct stfsm *fsm);
static int stfsm_s25fl_config(struct stfsm *fsm);
static int stfsm_w25q_config(struct stfsm *fsm);
static struct flash_info flash_types[] = {
/*
* ST Microelectronics/Numonyx --
* (newer production versions may have feature updates
* (eg faster operating frequency)
*/
#define M25P_FLAG (FLASH_FLAG_READ_WRITE | FLASH_FLAG_READ_FAST)
{ "m25p40", 0x202013, 0, 64 * 1024, 8, M25P_FLAG, 25, NULL },
{ "m25p80", 0x202014, 0, 64 * 1024, 16, M25P_FLAG, 25, NULL },
{ "m25p16", 0x202015, 0, 64 * 1024, 32, M25P_FLAG, 25, NULL },
{ "m25p32", 0x202016, 0, 64 * 1024, 64, M25P_FLAG, 50, NULL },
{ "m25p64", 0x202017, 0, 64 * 1024, 128, M25P_FLAG, 50, NULL },
{ "m25p128", 0x202018, 0, 256 * 1024, 64, M25P_FLAG, 50, NULL },
#define M25PX_FLAG (FLASH_FLAG_READ_WRITE | \
FLASH_FLAG_READ_FAST | \
FLASH_FLAG_READ_1_1_2 | \
FLASH_FLAG_WRITE_1_1_2)
{ "m25px32", 0x207116, 0, 64 * 1024, 64, M25PX_FLAG, 75, NULL },
{ "m25px64", 0x207117, 0, 64 * 1024, 128, M25PX_FLAG, 75, NULL },
/* Macronix MX25xxx
* - Support for 'FLASH_FLAG_WRITE_1_4_4' is omitted for devices
* where operating frequency must be reduced.
*/
#define MX25_FLAG (FLASH_FLAG_READ_WRITE | \
FLASH_FLAG_READ_FAST | \
FLASH_FLAG_READ_1_1_2 | \
FLASH_FLAG_READ_1_2_2 | \
FLASH_FLAG_READ_1_1_4 | \
FLASH_FLAG_SE_4K | \
FLASH_FLAG_SE_32K)
{ "mx25l3255e", 0xc29e16, 0, 64 * 1024, 64,
(MX25_FLAG | FLASH_FLAG_WRITE_1_4_4), 86,
stfsm_mx25_config},
{ "mx25l25635e", 0xc22019, 0, 64*1024, 512,
(MX25_FLAG | FLASH_FLAG_32BIT_ADDR | FLASH_FLAG_RESET), 70,
stfsm_mx25_config },
{ "mx25l25655e", 0xc22619, 0, 64*1024, 512,
(MX25_FLAG | FLASH_FLAG_32BIT_ADDR | FLASH_FLAG_RESET), 70,
stfsm_mx25_config},
#define N25Q_FLAG (FLASH_FLAG_READ_WRITE | \
FLASH_FLAG_READ_FAST | \
FLASH_FLAG_READ_1_1_2 | \
FLASH_FLAG_READ_1_2_2 | \
FLASH_FLAG_READ_1_1_4 | \
FLASH_FLAG_READ_1_4_4 | \
FLASH_FLAG_WRITE_1_1_2 | \
FLASH_FLAG_WRITE_1_2_2 | \
FLASH_FLAG_WRITE_1_1_4 | \
FLASH_FLAG_WRITE_1_4_4)
{ "n25q128", 0x20ba18, 0, 64 * 1024, 256, N25Q_FLAG, 108,
stfsm_n25q_config },
{ "n25q256", 0x20ba19, 0, 64 * 1024, 512,
N25Q_FLAG | FLASH_FLAG_32BIT_ADDR, 108, stfsm_n25q_config },
/*
* Spansion S25FLxxxP
* - 256KiB and 64KiB sector variants (identified by ext. JEDEC)
*/
#define S25FLXXXP_FLAG (FLASH_FLAG_READ_WRITE | \
FLASH_FLAG_READ_1_1_2 | \
FLASH_FLAG_READ_1_2_2 | \
FLASH_FLAG_READ_1_1_4 | \
FLASH_FLAG_READ_1_4_4 | \
FLASH_FLAG_WRITE_1_1_4 | \
FLASH_FLAG_READ_FAST)
{ "s25fl032p", 0x010215, 0x4d00, 64 * 1024, 64, S25FLXXXP_FLAG, 80,
stfsm_s25fl_config},
{ "s25fl129p0", 0x012018, 0x4d00, 256 * 1024, 64, S25FLXXXP_FLAG, 80,
stfsm_s25fl_config },
{ "s25fl129p1", 0x012018, 0x4d01, 64 * 1024, 256, S25FLXXXP_FLAG, 80,
stfsm_s25fl_config },
/*
* Spansion S25FLxxxS
* - 256KiB and 64KiB sector variants (identified by ext. JEDEC)
* - RESET# signal supported by die but not bristled out on all
* package types. The package type is a function of board design,
* so this information is captured in the board's flags.
* - Supports 'DYB' sector protection. Depending on variant, sectors
* may default to locked state on power-on.
*/
#define S25FLXXXS_FLAG (S25FLXXXP_FLAG | \
FLASH_FLAG_RESET | \
FLASH_FLAG_DYB_LOCKING)
{ "s25fl128s0", 0x012018, 0x0300, 256 * 1024, 64, S25FLXXXS_FLAG, 80,
stfsm_s25fl_config },
{ "s25fl128s1", 0x012018, 0x0301, 64 * 1024, 256, S25FLXXXS_FLAG, 80,
stfsm_s25fl_config },
{ "s25fl256s0", 0x010219, 0x4d00, 256 * 1024, 128,
S25FLXXXS_FLAG | FLASH_FLAG_32BIT_ADDR, 80, stfsm_s25fl_config },
{ "s25fl256s1", 0x010219, 0x4d01, 64 * 1024, 512,
S25FLXXXS_FLAG | FLASH_FLAG_32BIT_ADDR, 80, stfsm_s25fl_config },
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
#define W25X_FLAG (FLASH_FLAG_READ_WRITE | \
FLASH_FLAG_READ_FAST | \
FLASH_FLAG_READ_1_1_2 | \
FLASH_FLAG_WRITE_1_1_2)
{ "w25x40", 0xef3013, 0, 64 * 1024, 8, W25X_FLAG, 75, NULL },
{ "w25x80", 0xef3014, 0, 64 * 1024, 16, W25X_FLAG, 75, NULL },
{ "w25x16", 0xef3015, 0, 64 * 1024, 32, W25X_FLAG, 75, NULL },
{ "w25x32", 0xef3016, 0, 64 * 1024, 64, W25X_FLAG, 75, NULL },
{ "w25x64", 0xef3017, 0, 64 * 1024, 128, W25X_FLAG, 75, NULL },
/* Winbond -- w25q "blocks" are 64K, "sectors" are 4KiB */
#define W25Q_FLAG (FLASH_FLAG_READ_WRITE | \
FLASH_FLAG_READ_FAST | \
FLASH_FLAG_READ_1_1_2 | \
FLASH_FLAG_READ_1_2_2 | \
FLASH_FLAG_READ_1_1_4 | \
FLASH_FLAG_READ_1_4_4 | \
FLASH_FLAG_WRITE_1_1_4)
{ "w25q80", 0xef4014, 0, 64 * 1024, 16, W25Q_FLAG, 80,
stfsm_w25q_config },
{ "w25q16", 0xef4015, 0, 64 * 1024, 32, W25Q_FLAG, 80,
stfsm_w25q_config },
{ "w25q32", 0xef4016, 0, 64 * 1024, 64, W25Q_FLAG, 80,
stfsm_w25q_config },
{ "w25q64", 0xef4017, 0, 64 * 1024, 128, W25Q_FLAG, 80,
stfsm_w25q_config },
/* Sentinel */
{ NULL, 0x000000, 0, 0, 0, 0, 0, NULL },
};
/*
* FSM message sequence configurations:
*
* All configs are presented in order of preference
*/
/* Default READ configurations, in order of preference */
static struct seq_rw_config default_read_configs[] = {
{FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4, 0, 4, 4, 0x00, 2, 4},
{FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4, 0, 1, 4, 0x00, 4, 0},
{FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2, 0, 2, 2, 0x00, 4, 0},
{FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2, 0, 1, 2, 0x00, 0, 8},
{FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST, 0, 1, 1, 0x00, 0, 8},
{FLASH_FLAG_READ_WRITE, SPINOR_OP_READ, 0, 1, 1, 0x00, 0, 0},
{0x00, 0, 0, 0, 0, 0x00, 0, 0},
};
/* Default WRITE configurations */
static struct seq_rw_config default_write_configs[] = {
{FLASH_FLAG_WRITE_1_4_4, SPINOR_OP_WRITE_1_4_4, 1, 4, 4, 0x00, 0, 0},
{FLASH_FLAG_WRITE_1_1_4, SPINOR_OP_WRITE_1_1_4, 1, 1, 4, 0x00, 0, 0},
{FLASH_FLAG_WRITE_1_2_2, SPINOR_OP_WRITE_1_2_2, 1, 2, 2, 0x00, 0, 0},
{FLASH_FLAG_WRITE_1_1_2, SPINOR_OP_WRITE_1_1_2, 1, 1, 2, 0x00, 0, 0},
{FLASH_FLAG_READ_WRITE, SPINOR_OP_WRITE, 1, 1, 1, 0x00, 0, 0},
{0x00, 0, 0, 0, 0, 0x00, 0, 0},
};
/*
* [N25Qxxx] Configuration
*/
#define N25Q_VCR_DUMMY_CYCLES(x) (((x) & 0xf) << 4)
#define N25Q_VCR_XIP_DISABLED ((uint8_t)0x1 << 3)
#define N25Q_VCR_WRAP_CONT 0x3
/* N25Q 3-byte Address READ configurations
* - 'FAST' variants configured for 8 dummy cycles.
*
* Note, the number of dummy cycles used for 'FAST' READ operations is
* configurable and would normally be tuned according to the READ command and
* operating frequency. However, this applies universally to all 'FAST' READ
* commands, including those used by the SPIBoot controller, and remains in
* force until the device is power-cycled. Since the SPIBoot controller is
* hard-wired to use 8 dummy cycles, we must configure the device to also use 8
* cycles.
*/
static struct seq_rw_config n25q_read3_configs[] = {
{FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4, 0, 4, 4, 0x00, 0, 8},
{FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4, 0, 1, 4, 0x00, 0, 8},
{FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2, 0, 2, 2, 0x00, 0, 8},
{FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2, 0, 1, 2, 0x00, 0, 8},
{FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST, 0, 1, 1, 0x00, 0, 8},
{FLASH_FLAG_READ_WRITE, SPINOR_OP_READ, 0, 1, 1, 0x00, 0, 0},
{0x00, 0, 0, 0, 0, 0x00, 0, 0},
};
/* N25Q 4-byte Address READ configurations
* - use special 4-byte address READ commands (reduces overheads, and
* reduces risk of hitting watchdog reset issues).
* - 'FAST' variants configured for 8 dummy cycles (see note above.)
*/
static struct seq_rw_config n25q_read4_configs[] = {
{FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B, 0, 4, 4, 0x00, 0, 8},
{FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B, 0, 1, 4, 0x00, 0, 8},
{FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B, 0, 2, 2, 0x00, 0, 8},
{FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B, 0, 1, 2, 0x00, 0, 8},
{FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST_4B, 0, 1, 1, 0x00, 0, 8},
{FLASH_FLAG_READ_WRITE, SPINOR_OP_READ_4B, 0, 1, 1, 0x00, 0, 0},
{0x00, 0, 0, 0, 0, 0x00, 0, 0},
};
/*
* [MX25xxx] Configuration
*/
#define MX25_STATUS_QE (0x1 << 6)
static int stfsm_mx25_en_32bit_addr_seq(struct stfsm_seq *seq)
{
seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(SPINOR_OP_EN4B) |
SEQ_OPC_CSDEASSERT);
seq->seq[0] = STFSM_INST_CMD1;
seq->seq[1] = STFSM_INST_WAIT;
seq->seq[2] = STFSM_INST_STOP;
seq->seq_cfg = (SEQ_CFG_PADS_1 |
SEQ_CFG_ERASE |
SEQ_CFG_READNOTWRITE |
SEQ_CFG_CSDEASSERT |
SEQ_CFG_STARTSEQ);
return 0;
}
/*
* [S25FLxxx] Configuration
*/
#define STFSM_S25FL_CONFIG_QE (0x1 << 1)
/*
* S25FLxxxS devices provide three ways of supporting 32-bit addressing: Bank
* Register, Extended Address Modes, and a 32-bit address command set. The
* 32-bit address command set is used here, since it avoids any problems with
* entering a state that is incompatible with the SPIBoot Controller.
*/
static struct seq_rw_config stfsm_s25fl_read4_configs[] = {
{FLASH_FLAG_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B, 0, 4, 4, 0x00, 2, 4},
{FLASH_FLAG_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B, 0, 1, 4, 0x00, 0, 8},
{FLASH_FLAG_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B, 0, 2, 2, 0x00, 4, 0},
{FLASH_FLAG_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B, 0, 1, 2, 0x00, 0, 8},
{FLASH_FLAG_READ_FAST, SPINOR_OP_READ_FAST_4B, 0, 1, 1, 0x00, 0, 8},
{FLASH_FLAG_READ_WRITE, SPINOR_OP_READ_4B, 0, 1, 1, 0x00, 0, 0},
{0x00, 0, 0, 0, 0, 0x00, 0, 0},
};
static struct seq_rw_config stfsm_s25fl_write4_configs[] = {
{FLASH_FLAG_WRITE_1_1_4, S25FL_CMD_WRITE4_1_1_4, 1, 1, 4, 0x00, 0, 0},
{FLASH_FLAG_READ_WRITE, S25FL_CMD_WRITE4, 1, 1, 1, 0x00, 0, 0},
{0x00, 0, 0, 0, 0, 0x00, 0, 0},
};
/*
* [W25Qxxx] Configuration
*/
#define W25Q_STATUS_QE (0x1 << 1)
static struct stfsm_seq stfsm_seq_read_jedec = {
.data_size = TRANSFER_SIZE(8),
.seq_opc[0] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(SPINOR_OP_RDID)),
.seq = {
STFSM_INST_CMD1,
STFSM_INST_DATA_READ,
STFSM_INST_STOP,
},
.seq_cfg = (SEQ_CFG_PADS_1 |
SEQ_CFG_READNOTWRITE |
SEQ_CFG_CSDEASSERT |
SEQ_CFG_STARTSEQ),
};
static struct stfsm_seq stfsm_seq_read_status_fifo = {
.data_size = TRANSFER_SIZE(4),
.seq_opc[0] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(SPINOR_OP_RDSR)),
.seq = {
STFSM_INST_CMD1,
STFSM_INST_DATA_READ,
STFSM_INST_STOP,
},
.seq_cfg = (SEQ_CFG_PADS_1 |
SEQ_CFG_READNOTWRITE |
SEQ_CFG_CSDEASSERT |
SEQ_CFG_STARTSEQ),
};
static struct stfsm_seq stfsm_seq_erase_sector = {
/* 'addr_cfg' configured during initialisation */
.seq_opc = {
(SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
(SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(SPINOR_OP_SE)),
},
.seq = {
STFSM_INST_CMD1,
STFSM_INST_CMD2,
STFSM_INST_ADD1,
STFSM_INST_ADD2,
STFSM_INST_STOP,
},
.seq_cfg = (SEQ_CFG_PADS_1 |
SEQ_CFG_READNOTWRITE |
SEQ_CFG_CSDEASSERT |
SEQ_CFG_STARTSEQ),
};
static struct stfsm_seq stfsm_seq_erase_chip = {
.seq_opc = {
(SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
(SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(SPINOR_OP_CHIP_ERASE) | SEQ_OPC_CSDEASSERT),
},
.seq = {
STFSM_INST_CMD1,
STFSM_INST_CMD2,
STFSM_INST_WAIT,
STFSM_INST_STOP,
},
.seq_cfg = (SEQ_CFG_PADS_1 |
SEQ_CFG_ERASE |
SEQ_CFG_READNOTWRITE |
SEQ_CFG_CSDEASSERT |
SEQ_CFG_STARTSEQ),
};
static struct stfsm_seq stfsm_seq_write_status = {
.seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(SPINOR_OP_WREN) | SEQ_OPC_CSDEASSERT),
.seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(SPINOR_OP_WRSR)),
.seq = {
STFSM_INST_CMD1,
STFSM_INST_CMD2,
STFSM_INST_STA_WR1,
STFSM_INST_STOP,
},
.seq_cfg = (SEQ_CFG_PADS_1 |
SEQ_CFG_READNOTWRITE |
SEQ_CFG_CSDEASSERT |
SEQ_CFG_STARTSEQ),
};
/* Dummy sequence to read one byte of data from flash into the FIFO */
static const struct stfsm_seq stfsm_seq_load_fifo_byte = {
.data_size = TRANSFER_SIZE(1),
.seq_opc[0] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(SPINOR_OP_RDID)),
.seq = {
STFSM_INST_CMD1,
STFSM_INST_DATA_READ,
STFSM_INST_STOP,
},
.seq_cfg = (SEQ_CFG_PADS_1 |
SEQ_CFG_READNOTWRITE |
SEQ_CFG_CSDEASSERT |
SEQ_CFG_STARTSEQ),
};
static int stfsm_n25q_en_32bit_addr_seq(struct stfsm_seq *seq)
{
seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(SPINOR_OP_EN4B));
seq->seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
SEQ_OPC_CSDEASSERT);
seq->seq[0] = STFSM_INST_CMD2;
seq->seq[1] = STFSM_INST_CMD1;
seq->seq[2] = STFSM_INST_WAIT;
seq->seq[3] = STFSM_INST_STOP;
seq->seq_cfg = (SEQ_CFG_PADS_1 |
SEQ_CFG_ERASE |
SEQ_CFG_READNOTWRITE |
SEQ_CFG_CSDEASSERT |
SEQ_CFG_STARTSEQ);
return 0;
}
static inline int stfsm_is_idle(struct stfsm *fsm)
{
return readl(fsm->base + SPI_FAST_SEQ_STA) & 0x10;
}
static inline uint32_t stfsm_fifo_available(struct stfsm *fsm)
{
return (readl(fsm->base + SPI_FAST_SEQ_STA) >> 5) & 0x7f;
}
static inline void stfsm_load_seq(struct stfsm *fsm,
const struct stfsm_seq *seq)
{
void __iomem *dst = fsm->base + SPI_FAST_SEQ_TRANSFER_SIZE;
const uint32_t *src = (const uint32_t *)seq;
int words = sizeof(*seq) / sizeof(*src);
BUG_ON(!stfsm_is_idle(fsm));
while (words--) {
writel(*src, dst);
src++;
dst += 4;
}
}
static void stfsm_wait_seq(struct stfsm *fsm)
{
unsigned long deadline;
int timeout = 0;
deadline = jiffies + msecs_to_jiffies(STFSM_MAX_WAIT_SEQ_MS);
while (!timeout) {
if (time_after_eq(jiffies, deadline))
timeout = 1;
if (stfsm_is_idle(fsm))
return;
cond_resched();
}
dev_err(fsm->dev, "timeout on sequence completion\n");
}
static void stfsm_read_fifo(struct stfsm *fsm, uint32_t *buf, uint32_t size)
{
uint32_t remaining = size >> 2;
uint32_t avail;
uint32_t words;
dev_dbg(fsm->dev, "Reading %d bytes from FIFO\n", size);
BUG_ON((((uintptr_t)buf) & 0x3) || (size & 0x3));
while (remaining) {
for (;;) {
avail = stfsm_fifo_available(fsm);
if (avail)
break;
udelay(1);
}
words = min(avail, remaining);
remaining -= words;
readsl(fsm->base + SPI_FAST_SEQ_DATA_REG, buf, words);
buf += words;
}
}
/*
* Clear the data FIFO
*
* Typically, this is only required during driver initialisation, where no
* assumptions can be made regarding the state of the FIFO.
*
* The process of clearing the FIFO is complicated by fact that while it is
* possible for the FIFO to contain an arbitrary number of bytes [1], the
* SPI_FAST_SEQ_STA register only reports the number of complete 32-bit words
* present. Furthermore, data can only be drained from the FIFO by reading
* complete 32-bit words.
*
* With this in mind, a two stage process is used to the clear the FIFO:
*
* 1. Read any complete 32-bit words from the FIFO, as reported by the
* SPI_FAST_SEQ_STA register.
*
* 2. Mop up any remaining bytes. At this point, it is not known if there
* are 0, 1, 2, or 3 bytes in the FIFO. To handle all cases, a dummy FSM
* sequence is used to load one byte at a time, until a complete 32-bit
* word is formed; at most, 4 bytes will need to be loaded.
*
* [1] It is theoretically possible for the FIFO to contain an arbitrary number
* of bits. However, since there are no known use-cases that leave
* incomplete bytes in the FIFO, only words and bytes are considered here.
*/
static void stfsm_clear_fifo(struct stfsm *fsm)
{
const struct stfsm_seq *seq = &stfsm_seq_load_fifo_byte;
uint32_t words, i;
/* 1. Clear any 32-bit words */
words = stfsm_fifo_available(fsm);
if (words) {
for (i = 0; i < words; i++)
readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
dev_dbg(fsm->dev, "cleared %d words from FIFO\n", words);
}
/*
* 2. Clear any remaining bytes
* - Load the FIFO, one byte at a time, until a complete 32-bit word
* is available.
*/
for (i = 0, words = 0; i < 4 && !words; i++) {
stfsm_load_seq(fsm, seq);
stfsm_wait_seq(fsm);
words = stfsm_fifo_available(fsm);
}
/* - A single word must be available now */
if (words != 1) {
dev_err(fsm->dev, "failed to clear bytes from the data FIFO\n");
return;
}
/* - Read the 32-bit word */
readl(fsm->base + SPI_FAST_SEQ_DATA_REG);
dev_dbg(fsm->dev, "cleared %d byte(s) from the data FIFO\n", 4 - i);
}
static int stfsm_write_fifo(struct stfsm *fsm, const uint32_t *buf,
uint32_t size)
{
uint32_t words = size >> 2;
dev_dbg(fsm->dev, "writing %d bytes to FIFO\n", size);
BUG_ON((((uintptr_t)buf) & 0x3) || (size & 0x3));
writesl(fsm->base + SPI_FAST_SEQ_DATA_REG, buf, words);
return size;
}
static int stfsm_enter_32bit_addr(struct stfsm *fsm, int enter)
{
struct stfsm_seq *seq = &fsm->stfsm_seq_en_32bit_addr;
uint32_t cmd = enter ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(cmd) |
SEQ_OPC_CSDEASSERT);
stfsm_load_seq(fsm, seq);
stfsm_wait_seq(fsm);
return 0;
}
static uint8_t stfsm_wait_busy(struct stfsm *fsm)
{
struct stfsm_seq *seq = &stfsm_seq_read_status_fifo;
unsigned long deadline;
uint32_t status;
int timeout = 0;
/* Use RDRS1 */
seq->seq_opc[0] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(SPINOR_OP_RDSR));
/* Load read_status sequence */
stfsm_load_seq(fsm, seq);
/*
* Repeat until busy bit is deasserted, or timeout, or error (S25FLxxxS)
*/
deadline = jiffies + FLASH_MAX_BUSY_WAIT;
while (!timeout) {
if (time_after_eq(jiffies, deadline))
timeout = 1;
stfsm_wait_seq(fsm);
stfsm_read_fifo(fsm, &status, 4);
if ((status & FLASH_STATUS_BUSY) == 0)
return 0;
if ((fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS) &&
((status & S25FL_STATUS_P_ERR) ||
(status & S25FL_STATUS_E_ERR)))
return (uint8_t)(status & 0xff);
if (!timeout)
/* Restart */
writel(seq->seq_cfg, fsm->base + SPI_FAST_SEQ_CFG);
cond_resched();
}
dev_err(fsm->dev, "timeout on wait_busy\n");
return FLASH_STATUS_TIMEOUT;
}
static int stfsm_read_status(struct stfsm *fsm, uint8_t cmd,
uint8_t *data, int bytes)
{
struct stfsm_seq *seq = &stfsm_seq_read_status_fifo;
uint32_t tmp;
uint8_t *t = (uint8_t *)&tmp;
int i;
dev_dbg(fsm->dev, "read 'status' register [0x%02x], %d byte(s)\n",
cmd, bytes);
BUG_ON(bytes != 1 && bytes != 2);
seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(cmd));
stfsm_load_seq(fsm, seq);
stfsm_read_fifo(fsm, &tmp, 4);
for (i = 0; i < bytes; i++)
data[i] = t[i];
stfsm_wait_seq(fsm);
return 0;
}
static int stfsm_write_status(struct stfsm *fsm, uint8_t cmd,
uint16_t data, int bytes, int wait_busy)
{
struct stfsm_seq *seq = &stfsm_seq_write_status;
dev_dbg(fsm->dev,
"write 'status' register [0x%02x], %d byte(s), 0x%04x\n"
" %s wait-busy\n", cmd, bytes, data, wait_busy ? "with" : "no");
BUG_ON(bytes != 1 && bytes != 2);
seq->seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(cmd));
seq->status = (uint32_t)data | STA_PADS_1 | STA_CSDEASSERT;
seq->seq[2] = (bytes == 1) ? STFSM_INST_STA_WR1 : STFSM_INST_STA_WR1_2;
stfsm_load_seq(fsm, seq);
stfsm_wait_seq(fsm);
if (wait_busy)
stfsm_wait_busy(fsm);
return 0;
}
/*
* SoC reset on 'boot-from-spi' systems
*
* Certain modes of operation cause the Flash device to enter a particular state
* for a period of time (e.g. 'Erase Sector', 'Quad Enable', and 'Enter 32-bit
* Addr' commands). On boot-from-spi systems, it is important to consider what
* happens if a warm reset occurs during this period. The SPIBoot controller
* assumes that Flash device is in its default reset state, 24-bit address mode,
* and ready to accept commands. This can be achieved using some form of
* on-board logic/controller to force a device POR in response to a SoC-level
* reset or by making use of the device reset signal if available (limited
* number of devices only).
*
* Failure to take such precautions can cause problems following a warm reset.
* For some operations (e.g. ERASE), there is little that can be done. For
* other modes of operation (e.g. 32-bit addressing), options are often
* available that can help minimise the window in which a reset could cause a
* problem.
*
*/
static bool stfsm_can_handle_soc_reset(struct stfsm *fsm)
{
/* Reset signal is available on the board and supported by the device */
if (fsm->reset_signal && fsm->info->flags & FLASH_FLAG_RESET)
return true;
/* Board-level logic forces a power-on-reset */
if (fsm->reset_por)
return true;
/* Reset is not properly handled and may result in failure to reboot */
return false;
}
/* Configure 'addr_cfg' according to addressing mode */
static void stfsm_prepare_erasesec_seq(struct stfsm *fsm,
struct stfsm_seq *seq)
{
int addr1_cycles = fsm->info->flags & FLASH_FLAG_32BIT_ADDR ? 16 : 8;
seq->addr_cfg = (ADR_CFG_CYCLES_ADD1(addr1_cycles) |
ADR_CFG_PADS_1_ADD1 |
ADR_CFG_CYCLES_ADD2(16) |
ADR_CFG_PADS_1_ADD2 |
ADR_CFG_CSDEASSERT_ADD2);
}
/* Search for preferred configuration based on available flags */
static struct seq_rw_config *
stfsm_search_seq_rw_configs(struct stfsm *fsm,
struct seq_rw_config cfgs[])
{
struct seq_rw_config *config;
int flags = fsm->info->flags;
for (config = cfgs; config->cmd != 0; config++)
if ((config->flags & flags) == config->flags)
return config;
return NULL;
}
/* Prepare a READ/WRITE sequence according to configuration parameters */
static void stfsm_prepare_rw_seq(struct stfsm *fsm,
struct stfsm_seq *seq,
struct seq_rw_config *cfg)
{
int addr1_cycles, addr2_cycles;
int i = 0;
memset(seq, 0, sizeof(*seq));
/* Add READ/WRITE OPC */
seq->seq_opc[i++] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(cfg->cmd));
/* Add WREN OPC for a WRITE sequence */
if (cfg->write)
seq->seq_opc[i++] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
SEQ_OPC_CSDEASSERT);
/* Address configuration (24 or 32-bit addresses) */
addr1_cycles = (fsm->info->flags & FLASH_FLAG_32BIT_ADDR) ? 16 : 8;
addr1_cycles /= cfg->addr_pads;
addr2_cycles = 16 / cfg->addr_pads;
seq->addr_cfg = ((addr1_cycles & 0x3f) << 0 | /* ADD1 cycles */
(cfg->addr_pads - 1) << 6 | /* ADD1 pads */
(addr2_cycles & 0x3f) << 16 | /* ADD2 cycles */
((cfg->addr_pads - 1) << 22)); /* ADD2 pads */
/* Data/Sequence configuration */
seq->seq_cfg = ((cfg->data_pads - 1) << 16 |
SEQ_CFG_STARTSEQ |
SEQ_CFG_CSDEASSERT);
if (!cfg->write)
seq->seq_cfg |= SEQ_CFG_READNOTWRITE;
/* Mode configuration (no. of pads taken from addr cfg) */
seq->mode = ((cfg->mode_data & 0xff) << 0 | /* data */
(cfg->mode_cycles & 0x3f) << 16 | /* cycles */
(cfg->addr_pads - 1) << 22); /* pads */
/* Dummy configuration (no. of pads taken from addr cfg) */
seq->dummy = ((cfg->dummy_cycles & 0x3f) << 16 | /* cycles */
(cfg->addr_pads - 1) << 22); /* pads */
/* Instruction sequence */
i = 0;
if (cfg->write)
seq->seq[i++] = STFSM_INST_CMD2;
seq->seq[i++] = STFSM_INST_CMD1;
seq->seq[i++] = STFSM_INST_ADD1;
seq->seq[i++] = STFSM_INST_ADD2;
if (cfg->mode_cycles)
seq->seq[i++] = STFSM_INST_MODE;
if (cfg->dummy_cycles)
seq->seq[i++] = STFSM_INST_DUMMY;
seq->seq[i++] =
cfg->write ? STFSM_INST_DATA_WRITE : STFSM_INST_DATA_READ;
seq->seq[i++] = STFSM_INST_STOP;
}
static int stfsm_search_prepare_rw_seq(struct stfsm *fsm,
struct stfsm_seq *seq,
struct seq_rw_config *cfgs)
{
struct seq_rw_config *config;
config = stfsm_search_seq_rw_configs(fsm, cfgs);
if (!config) {
dev_err(fsm->dev, "failed to find suitable config\n");
return -EINVAL;
}
stfsm_prepare_rw_seq(fsm, seq, config);
return 0;
}
/* Prepare a READ/WRITE/ERASE 'default' sequences */
static int stfsm_prepare_rwe_seqs_default(struct stfsm *fsm)
{
uint32_t flags = fsm->info->flags;
int ret;
/* Configure 'READ' sequence */
ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
default_read_configs);
if (ret) {
dev_err(fsm->dev,
"failed to prep READ sequence with flags [0x%08x]\n",
flags);
return ret;
}
/* Configure 'WRITE' sequence */
ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write,
default_write_configs);
if (ret) {
dev_err(fsm->dev,
"failed to prep WRITE sequence with flags [0x%08x]\n",
flags);
return ret;
}
/* Configure 'ERASE_SECTOR' sequence */
stfsm_prepare_erasesec_seq(fsm, &stfsm_seq_erase_sector);
return 0;
}
static int stfsm_mx25_config(struct stfsm *fsm)
{
uint32_t flags = fsm->info->flags;
uint32_t data_pads;
uint8_t sta;
int ret;
bool soc_reset;
/*
* Use default READ/WRITE sequences
*/
ret = stfsm_prepare_rwe_seqs_default(fsm);
if (ret)
return ret;
/*
* Configure 32-bit Address Support
*/
if (flags & FLASH_FLAG_32BIT_ADDR) {
/* Configure 'enter_32bitaddr' FSM sequence */
stfsm_mx25_en_32bit_addr_seq(&fsm->stfsm_seq_en_32bit_addr);
soc_reset = stfsm_can_handle_soc_reset(fsm);
if (soc_reset || !fsm->booted_from_spi)
/* If we can handle SoC resets, we enable 32-bit address
* mode pervasively */
stfsm_enter_32bit_addr(fsm, 1);
else
/* Else, enable/disable 32-bit addressing before/after
* each operation */
fsm->configuration = (CFG_READ_TOGGLE_32BIT_ADDR |
CFG_WRITE_TOGGLE_32BIT_ADDR |
CFG_ERASESEC_TOGGLE_32BIT_ADDR);
}
/* Check status of 'QE' bit, update if required. */
stfsm_read_status(fsm, SPINOR_OP_RDSR, &sta, 1);
data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
if (data_pads == 4) {
if (!(sta & MX25_STATUS_QE)) {
/* Set 'QE' */
sta |= MX25_STATUS_QE;
stfsm_write_status(fsm, SPINOR_OP_WRSR, sta, 1, 1);
}
} else {
if (sta & MX25_STATUS_QE) {
/* Clear 'QE' */
sta &= ~MX25_STATUS_QE;
stfsm_write_status(fsm, SPINOR_OP_WRSR, sta, 1, 1);
}
}
return 0;
}
static int stfsm_n25q_config(struct stfsm *fsm)
{
uint32_t flags = fsm->info->flags;
uint8_t vcr;
int ret = 0;
bool soc_reset;
/* Configure 'READ' sequence */
if (flags & FLASH_FLAG_32BIT_ADDR)
ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
n25q_read4_configs);
else
ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
n25q_read3_configs);
if (ret) {
dev_err(fsm->dev,
"failed to prepare READ sequence with flags [0x%08x]\n",
flags);
return ret;
}
/* Configure 'WRITE' sequence (default configs) */
ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write,
default_write_configs);
if (ret) {
dev_err(fsm->dev,
"preparing WRITE sequence using flags [0x%08x] failed\n",
flags);
return ret;
}
/* * Configure 'ERASE_SECTOR' sequence */
stfsm_prepare_erasesec_seq(fsm, &stfsm_seq_erase_sector);
/* Configure 32-bit address support */
if (flags & FLASH_FLAG_32BIT_ADDR) {
stfsm_n25q_en_32bit_addr_seq(&fsm->stfsm_seq_en_32bit_addr);
soc_reset = stfsm_can_handle_soc_reset(fsm);
if (soc_reset || !fsm->booted_from_spi) {
/*
* If we can handle SoC resets, we enable 32-bit
* address mode pervasively
*/
stfsm_enter_32bit_addr(fsm, 1);
} else {
/*
* If not, enable/disable for WRITE and ERASE
* operations (READ uses special commands)
*/
fsm->configuration = (CFG_WRITE_TOGGLE_32BIT_ADDR |
CFG_ERASESEC_TOGGLE_32BIT_ADDR);
}
}
/*
* Configure device to use 8 dummy cycles
*/
vcr = (N25Q_VCR_DUMMY_CYCLES(8) | N25Q_VCR_XIP_DISABLED |
N25Q_VCR_WRAP_CONT);
stfsm_write_status(fsm, N25Q_CMD_WRVCR, vcr, 1, 0);
return 0;
}
static void stfsm_s25fl_prepare_erasesec_seq_32(struct stfsm_seq *seq)
{
seq->seq_opc[1] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(S25FL_CMD_SE4));
seq->addr_cfg = (ADR_CFG_CYCLES_ADD1(16) |
ADR_CFG_PADS_1_ADD1 |
ADR_CFG_CYCLES_ADD2(16) |
ADR_CFG_PADS_1_ADD2 |
ADR_CFG_CSDEASSERT_ADD2);
}
static void stfsm_s25fl_read_dyb(struct stfsm *fsm, uint32_t offs, uint8_t *dby)
{
uint32_t tmp;
struct stfsm_seq seq = {
.data_size = TRANSFER_SIZE(4),
.seq_opc[0] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(S25FL_CMD_DYBRD)),
.addr_cfg = (ADR_CFG_CYCLES_ADD1(16) |
ADR_CFG_PADS_1_ADD1 |
ADR_CFG_CYCLES_ADD2(16) |
ADR_CFG_PADS_1_ADD2),
.addr1 = (offs >> 16) & 0xffff,
.addr2 = offs & 0xffff,
.seq = {
STFSM_INST_CMD1,
STFSM_INST_ADD1,
STFSM_INST_ADD2,
STFSM_INST_DATA_READ,
STFSM_INST_STOP,
},
.seq_cfg = (SEQ_CFG_PADS_1 |
SEQ_CFG_READNOTWRITE |
SEQ_CFG_CSDEASSERT |
SEQ_CFG_STARTSEQ),
};
stfsm_load_seq(fsm, &seq);
stfsm_read_fifo(fsm, &tmp, 4);
*dby = (uint8_t)(tmp >> 24);
stfsm_wait_seq(fsm);
}
static void stfsm_s25fl_write_dyb(struct stfsm *fsm, uint32_t offs, uint8_t dby)
{
struct stfsm_seq seq = {
.seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(SPINOR_OP_WREN) |
SEQ_OPC_CSDEASSERT),
.seq_opc[1] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(S25FL_CMD_DYBWR)),
.addr_cfg = (ADR_CFG_CYCLES_ADD1(16) |
ADR_CFG_PADS_1_ADD1 |
ADR_CFG_CYCLES_ADD2(16) |
ADR_CFG_PADS_1_ADD2),
.status = (uint32_t)dby | STA_PADS_1 | STA_CSDEASSERT,
.addr1 = (offs >> 16) & 0xffff,
.addr2 = offs & 0xffff,
.seq = {
STFSM_INST_CMD1,
STFSM_INST_CMD2,
STFSM_INST_ADD1,
STFSM_INST_ADD2,
STFSM_INST_STA_WR1,
STFSM_INST_STOP,
},
.seq_cfg = (SEQ_CFG_PADS_1 |
SEQ_CFG_READNOTWRITE |
SEQ_CFG_CSDEASSERT |
SEQ_CFG_STARTSEQ),
};
stfsm_load_seq(fsm, &seq);
stfsm_wait_seq(fsm);
stfsm_wait_busy(fsm);
}
static int stfsm_s25fl_clear_status_reg(struct stfsm *fsm)
{
struct stfsm_seq seq = {
.seq_opc[0] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(S25FL_CMD_CLSR) |
SEQ_OPC_CSDEASSERT),
.seq_opc[1] = (SEQ_OPC_PADS_1 |
SEQ_OPC_CYCLES(8) |
SEQ_OPC_OPCODE(SPINOR_OP_WRDI) |
SEQ_OPC_CSDEASSERT),
.seq = {
STFSM_INST_CMD1,
STFSM_INST_CMD2,
STFSM_INST_WAIT,
STFSM_INST_STOP,
},
.seq_cfg = (SEQ_CFG_PADS_1 |
SEQ_CFG_ERASE |
SEQ_CFG_READNOTWRITE |
SEQ_CFG_CSDEASSERT |
SEQ_CFG_STARTSEQ),
};
stfsm_load_seq(fsm, &seq);
stfsm_wait_seq(fsm);
return 0;
}
static int stfsm_s25fl_config(struct stfsm *fsm)
{
struct flash_info *info = fsm->info;
uint32_t flags = info->flags;
uint32_t data_pads;
uint32_t offs;
uint16_t sta_wr;
uint8_t sr1, cr1, dyb;
int update_sr = 0;
int ret;
if (flags & FLASH_FLAG_32BIT_ADDR) {
/*
* Prepare Read/Write/Erase sequences according to S25FLxxx
* 32-bit address command set
*/
ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_read,
stfsm_s25fl_read4_configs);
if (ret)
return ret;
ret = stfsm_search_prepare_rw_seq(fsm, &fsm->stfsm_seq_write,
stfsm_s25fl_write4_configs);
if (ret)
return ret;
stfsm_s25fl_prepare_erasesec_seq_32(&stfsm_seq_erase_sector);
} else {
/* Use default configurations for 24-bit addressing */
ret = stfsm_prepare_rwe_seqs_default(fsm);
if (ret)
return ret;
}
/*
* For devices that support 'DYB' sector locking, check lock status and
* unlock sectors if necessary (some variants power-on with sectors
* locked by default)
*/
if (flags & FLASH_FLAG_DYB_LOCKING) {
offs = 0;
for (offs = 0; offs < info->sector_size * info->n_sectors;) {
stfsm_s25fl_read_dyb(fsm, offs, &dyb);
if (dyb == 0x00)
stfsm_s25fl_write_dyb(fsm, offs, 0xff);
/* Handle bottom/top 4KiB parameter sectors */
if ((offs < info->sector_size * 2) ||
(offs >= (info->sector_size - info->n_sectors * 4)))
offs += 0x1000;
else
offs += 0x10000;
}
}
/* Check status of 'QE' bit, update if required. */
stfsm_read_status(fsm, SPINOR_OP_RDCR, &cr1, 1);
data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
if (data_pads == 4) {
if (!(cr1 & STFSM_S25FL_CONFIG_QE)) {
/* Set 'QE' */
cr1 |= STFSM_S25FL_CONFIG_QE;
update_sr = 1;
}
} else {
if (cr1 & STFSM_S25FL_CONFIG_QE) {
/* Clear 'QE' */
cr1 &= ~STFSM_S25FL_CONFIG_QE;
update_sr = 1;
}
}
if (update_sr) {
stfsm_read_status(fsm, SPINOR_OP_RDSR, &sr1, 1);
sta_wr = ((uint16_t)cr1 << 8) | sr1;
stfsm_write_status(fsm, SPINOR_OP_WRSR, sta_wr, 2, 1);
}
/*
* S25FLxxx devices support Program and Error error flags.
* Configure driver to check flags and clear if necessary.
*/
fsm->configuration |= CFG_S25FL_CHECK_ERROR_FLAGS;
return 0;
}
static int stfsm_w25q_config(struct stfsm *fsm)
{
uint32_t data_pads;
uint8_t sr1, sr2;
uint16_t sr_wr;
int update_sr = 0;
int ret;
ret = stfsm_prepare_rwe_seqs_default(fsm);
if (ret)
return ret;
/* Check status of 'QE' bit, update if required. */
stfsm_read_status(fsm, SPINOR_OP_RDCR, &sr2, 1);
data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1;
if (data_pads == 4) {
if (!(sr2 & W25Q_STATUS_QE)) {
/* Set 'QE' */
sr2 |= W25Q_STATUS_QE;
update_sr = 1;
}
} else {
if (sr2 & W25Q_STATUS_QE) {
/* Clear 'QE' */
sr2 &= ~W25Q_STATUS_QE;
update_sr = 1;
}
}
if (update_sr) {
/* Write status register */
stfsm_read_status(fsm, SPINOR_OP_RDSR, &sr1, 1);
sr_wr = ((uint16_t)sr2 << 8) | sr1;
stfsm_write_status(fsm, SPINOR_OP_WRSR, sr_wr, 2, 1);
}
return 0;
}
static int stfsm_read(struct stfsm *fsm, uint8_t *buf, uint32_t size,
uint32_t offset)
{
struct stfsm_seq *seq = &fsm->stfsm_seq_read;
uint32_t data_pads;
uint32_t read_mask;
uint32_t size_ub;
uint32_t size_lb;
uint32_t size_mop;
uint32_t tmp[4];
uint32_t page_buf[FLASH_PAGESIZE_32];
uint8_t *p;
dev_dbg(fsm->dev, "reading %d bytes from 0x%08x\n", size, offset);
/* Enter 32-bit address mode, if required */
if (fsm->configuration & CFG_READ_TOGGLE_32BIT_ADDR)
stfsm_enter_32bit_addr(fsm, 1);
/* Must read in multiples of 32 cycles (or 32*pads/8 Bytes) */
data_pads = ((seq->seq_cfg >> 16) & 0x3) + 1;
read_mask = (data_pads << 2) - 1;
/* Handle non-aligned buf */
p = ((uintptr_t)buf & 0x3) ? (uint8_t *)page_buf : buf;
/* Handle non-aligned size */
size_ub = (size + read_mask) & ~read_mask;
size_lb = size & ~read_mask;
size_mop = size & read_mask;
seq->data_size = TRANSFER_SIZE(size_ub);
seq->addr1 = (offset >> 16) & 0xffff;
seq->addr2 = offset & 0xffff;
stfsm_load_seq(fsm, seq);
if (size_lb)
stfsm_read_fifo(fsm, (uint32_t *)p, size_lb);
if (size_mop) {
stfsm_read_fifo(fsm, tmp, read_mask + 1);
memcpy(p + size_lb, &tmp, size_mop);
}
/* Handle non-aligned buf */
if ((uintptr_t)buf & 0x3)
memcpy(buf, page_buf, size);
/* Wait for sequence to finish */
stfsm_wait_seq(fsm);
stfsm_clear_fifo(fsm);
/* Exit 32-bit address mode, if required */
if (fsm->configuration & CFG_READ_TOGGLE_32BIT_ADDR)
stfsm_enter_32bit_addr(fsm, 0);
return 0;
}
static int stfsm_write(struct stfsm *fsm, const uint8_t *buf,
uint32_t size, uint32_t offset)
{
struct stfsm_seq *seq = &fsm->stfsm_seq_write;
uint32_t data_pads;
uint32_t write_mask;
uint32_t size_ub;
uint32_t size_lb;
uint32_t size_mop;
uint32_t tmp[4];
uint32_t i;
uint32_t page_buf[FLASH_PAGESIZE_32];
uint8_t *t = (uint8_t *)&tmp;
const uint8_t *p;
int ret;
dev_dbg(fsm->dev, "writing %d bytes to 0x%08x\n", size, offset);
/* Enter 32-bit address mode, if required */
if (fsm->configuration & CFG_WRITE_TOGGLE_32BIT_ADDR)
stfsm_enter_32bit_addr(fsm, 1);
/* Must write in multiples of 32 cycles (or 32*pads/8 bytes) */
data_pads = ((seq->seq_cfg >> 16) & 0x3) + 1;
write_mask = (data_pads << 2) - 1;
/* Handle non-aligned buf */
if ((uintptr_t)buf & 0x3) {
memcpy(page_buf, buf, size);
p = (uint8_t *)page_buf;
} else {
p = buf;
}
/* Handle non-aligned size */
size_ub = (size + write_mask) & ~write_mask;
size_lb = size & ~write_mask;
size_mop = size & write_mask;
seq->data_size = TRANSFER_SIZE(size_ub);
seq->addr1 = (offset >> 16) & 0xffff;
seq->addr2 = offset & 0xffff;
/* Need to set FIFO to write mode, before writing data to FIFO (see
* GNBvb79594)
*/
writel(0x00040000, fsm->base + SPI_FAST_SEQ_CFG);
/*
* Before writing data to the FIFO, apply a small delay to allow a
* potential change of FIFO direction to complete.
*/
if (fsm->fifo_dir_delay == 0)
readl(fsm->base + SPI_FAST_SEQ_CFG);
else
udelay(fsm->fifo_dir_delay);
/* Write data to FIFO, before starting sequence (see GNBvd79593) */
if (size_lb) {
stfsm_write_fifo(fsm, (uint32_t *)p, size_lb);
p += size_lb;
}
/* Handle non-aligned size */
if (size_mop) {
memset(t, 0xff, write_mask + 1); /* fill with 0xff's */
for (i = 0; i < size_mop; i++)
t[i] = *p++;
stfsm_write_fifo(fsm, tmp, write_mask + 1);
}
/* Start sequence */
stfsm_load_seq(fsm, seq);
/* Wait for sequence to finish */
stfsm_wait_seq(fsm);
/* Wait for completion */
ret = stfsm_wait_busy(fsm);
if (ret && fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS)
stfsm_s25fl_clear_status_reg(fsm);
/* Exit 32-bit address mode, if required */
if (fsm->configuration & CFG_WRITE_TOGGLE_32BIT_ADDR)
stfsm_enter_32bit_addr(fsm, 0);
return 0;
}
/*
* Read an address range from the flash chip. The address range
* may be any size provided it is within the physical boundaries.
*/
static int stfsm_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent);
uint32_t bytes;
dev_dbg(fsm->dev, "%s from 0x%08x, len %zd\n",
__func__, (u32)from, len);
mutex_lock(&fsm->lock);
while (len > 0) {
bytes = min_t(size_t, len, FLASH_PAGESIZE);
stfsm_read(fsm, buf, bytes, from);
buf += bytes;
from += bytes;
len -= bytes;
*retlen += bytes;
}
mutex_unlock(&fsm->lock);
return 0;
}
static int stfsm_erase_sector(struct stfsm *fsm, uint32_t offset)
{
struct stfsm_seq *seq = &stfsm_seq_erase_sector;
int ret;
dev_dbg(fsm->dev, "erasing sector at 0x%08x\n", offset);
/* Enter 32-bit address mode, if required */
if (fsm->configuration & CFG_ERASESEC_TOGGLE_32BIT_ADDR)
stfsm_enter_32bit_addr(fsm, 1);
seq->addr1 = (offset >> 16) & 0xffff;
seq->addr2 = offset & 0xffff;
stfsm_load_seq(fsm, seq);
stfsm_wait_seq(fsm);
/* Wait for completion */
ret = stfsm_wait_busy(fsm);
if (ret && fsm->configuration & CFG_S25FL_CHECK_ERROR_FLAGS)
stfsm_s25fl_clear_status_reg(fsm);
/* Exit 32-bit address mode, if required */
if (fsm->configuration & CFG_ERASESEC_TOGGLE_32BIT_ADDR)
stfsm_enter_32bit_addr(fsm, 0);
return ret;
}
static int stfsm_erase_chip(struct stfsm *fsm)
{
const struct stfsm_seq *seq = &stfsm_seq_erase_chip;
dev_dbg(fsm->dev, "erasing chip\n");
stfsm_load_seq(fsm, seq);
stfsm_wait_seq(fsm);
return stfsm_wait_busy(fsm);
}
/*
* Write an address range to the flash chip. Data must be written in
* FLASH_PAGESIZE chunks. The address range may be any size provided
* it is within the physical boundaries.
*/
static int stfsm_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent);
u32 page_offs;
u32 bytes;
uint8_t *b = (uint8_t *)buf;
int ret = 0;
dev_dbg(fsm->dev, "%s to 0x%08x, len %zd\n", __func__, (u32)to, len);
/* Offset within page */
page_offs = to % FLASH_PAGESIZE;
mutex_lock(&fsm->lock);
while (len) {
/* Write up to page boundary */
bytes = min_t(size_t, FLASH_PAGESIZE - page_offs, len);
ret = stfsm_write(fsm, b, bytes, to);
if (ret)
goto out1;
b += bytes;
len -= bytes;
to += bytes;
/* We are now page-aligned */
page_offs = 0;
*retlen += bytes;
}
out1:
mutex_unlock(&fsm->lock);
return ret;
}
/*
* Erase an address range on the flash chip. The address range may extend
* one or more erase sectors. Return an error is there is a problem erasing.
*/
static int stfsm_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct stfsm *fsm = dev_get_drvdata(mtd->dev.parent);
u32 addr, len;
int ret;
dev_dbg(fsm->dev, "%s at 0x%llx, len %lld\n", __func__,
(long long)instr->addr, (long long)instr->len);
addr = instr->addr;
len = instr->len;
mutex_lock(&fsm->lock);
/* Whole-chip erase? */
if (len == mtd->size) {
ret = stfsm_erase_chip(fsm);
if (ret)
goto out1;
} else {
while (len) {
ret = stfsm_erase_sector(fsm, addr);
if (ret)
goto out1;
addr += mtd->erasesize;
len -= mtd->erasesize;
}
}
mutex_unlock(&fsm->lock);
return 0;
out1:
mutex_unlock(&fsm->lock);
return ret;
}
static void stfsm_read_jedec(struct stfsm *fsm, uint8_t *jedec)
{
const struct stfsm_seq *seq = &stfsm_seq_read_jedec;
uint32_t tmp[2];
stfsm_load_seq(fsm, seq);
stfsm_read_fifo(fsm, tmp, 8);
memcpy(jedec, tmp, 5);
stfsm_wait_seq(fsm);
}
static struct flash_info *stfsm_jedec_probe(struct stfsm *fsm)
{
struct flash_info *info;
u16 ext_jedec;
u32 jedec;
u8 id[5];
stfsm_read_jedec(fsm, id);
jedec = id[0] << 16 | id[1] << 8 | id[2];
/*
* JEDEC also defines an optional "extended device information"
* string for after vendor-specific data, after the three bytes
* we use here. Supporting some chips might require using it.
*/
ext_jedec = id[3] << 8 | id[4];
dev_dbg(fsm->dev, "JEDEC = 0x%08x [%5ph]\n", jedec, id);
for (info = flash_types; info->name; info++) {
if (info->jedec_id == jedec) {
if (info->ext_id && info->ext_id != ext_jedec)
continue;
return info;
}
}
dev_err(fsm->dev, "Unrecognized JEDEC id %06x\n", jedec);
return NULL;
}
static int stfsm_set_mode(struct stfsm *fsm, uint32_t mode)
{
int ret, timeout = 10;
/* Wait for controller to accept mode change */
while (--timeout) {
ret = readl(fsm->base + SPI_STA_MODE_CHANGE);
if (ret & 0x1)
break;
udelay(1);
}
if (!timeout)
return -EBUSY;
writel(mode, fsm->base + SPI_MODESELECT);
return 0;
}
static void stfsm_set_freq(struct stfsm *fsm, uint32_t spi_freq)
{
uint32_t emi_freq;
uint32_t clk_div;
emi_freq = clk_get_rate(fsm->clk);
/*
* Calculate clk_div - values between 2 and 128
* Multiple of 2, rounded up
*/
clk_div = 2 * DIV_ROUND_UP(emi_freq, 2 * spi_freq);
if (clk_div < 2)
clk_div = 2;
else if (clk_div > 128)
clk_div = 128;
/*
* Determine a suitable delay for the IP to complete a change of
* direction of the FIFO. The required delay is related to the clock
* divider used. The following heuristics are based on empirical tests,
* using a 100MHz EMI clock.
*/
if (clk_div <= 4)
fsm->fifo_dir_delay = 0;
else if (clk_div <= 10)
fsm->fifo_dir_delay = 1;
else
fsm->fifo_dir_delay = DIV_ROUND_UP(clk_div, 10);
dev_dbg(fsm->dev, "emi_clk = %uHZ, spi_freq = %uHZ, clk_div = %u\n",
emi_freq, spi_freq, clk_div);
writel(clk_div, fsm->base + SPI_CLOCKDIV);
}
static int stfsm_init(struct stfsm *fsm)
{
int ret;
/* Perform a soft reset of the FSM controller */
writel(SEQ_CFG_SWRESET, fsm->base + SPI_FAST_SEQ_CFG);
udelay(1);
writel(0, fsm->base + SPI_FAST_SEQ_CFG);
/* Set clock to 'safe' frequency initially */
stfsm_set_freq(fsm, STFSM_FLASH_SAFE_FREQ);
/* Switch to FSM */
ret = stfsm_set_mode(fsm, SPI_MODESELECT_FSM);
if (ret)
return ret;
/* Set timing parameters */
writel(SPI_CFG_DEVICE_ST |
SPI_CFG_DEFAULT_MIN_CS_HIGH |
SPI_CFG_DEFAULT_CS_SETUPHOLD |
SPI_CFG_DEFAULT_DATA_HOLD,
fsm->base + SPI_CONFIGDATA);
writel(STFSM_DEFAULT_WR_TIME, fsm->base + SPI_STATUS_WR_TIME_REG);
/*
* Set the FSM 'WAIT' delay to the minimum workable value. Note, for
* our purposes, the WAIT instruction is used purely to achieve
* "sequence validity" rather than actually implement a delay.
*/
writel(0x00000001, fsm->base + SPI_PROGRAM_ERASE_TIME);
/* Clear FIFO, just in case */
stfsm_clear_fifo(fsm);
return 0;
}
static void stfsm_fetch_platform_configs(struct platform_device *pdev)
{
struct stfsm *fsm = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
struct regmap *regmap;
uint32_t boot_device_reg;
uint32_t boot_device_spi;
uint32_t boot_device; /* Value we read from *boot_device_reg */
int ret;
/* Booting from SPI NOR Flash is the default */
fsm->booted_from_spi = true;
regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
if (IS_ERR(regmap))
goto boot_device_fail;
fsm->reset_signal = of_property_read_bool(np, "st,reset-signal");
fsm->reset_por = of_property_read_bool(np, "st,reset-por");
/* Where in the syscon the boot device information lives */
ret = of_property_read_u32(np, "st,boot-device-reg", &boot_device_reg);
if (ret)
goto boot_device_fail;
/* Boot device value when booted from SPI NOR */
ret = of_property_read_u32(np, "st,boot-device-spi", &boot_device_spi);
if (ret)
goto boot_device_fail;
ret = regmap_read(regmap, boot_device_reg, &boot_device);
if (ret)
goto boot_device_fail;
if (boot_device != boot_device_spi)
fsm->booted_from_spi = false;
return;
boot_device_fail:
dev_warn(&pdev->dev,
"failed to fetch boot device, assuming boot from SPI\n");
}
static int stfsm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct flash_info *info;
struct stfsm *fsm;
int ret;
if (!np) {
dev_err(&pdev->dev, "No DT found\n");
return -EINVAL;
}
fsm = devm_kzalloc(&pdev->dev, sizeof(*fsm), GFP_KERNEL);
if (!fsm)
return -ENOMEM;
fsm->dev = &pdev->dev;
platform_set_drvdata(pdev, fsm);
fsm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fsm->base))
return PTR_ERR(fsm->base);
fsm->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(fsm->clk)) {
dev_err(fsm->dev, "Couldn't find EMI clock.\n");
return PTR_ERR(fsm->clk);
}
mutex_init(&fsm->lock);
ret = stfsm_init(fsm);
if (ret) {
dev_err(&pdev->dev, "Failed to initialise FSM Controller\n");
return ret;
}
stfsm_fetch_platform_configs(pdev);
/* Detect SPI FLASH device */
info = stfsm_jedec_probe(fsm);
if (!info)
return -ENODEV;
fsm->info = info;
/* Use device size to determine address width */
if (info->sector_size * info->n_sectors > 0x1000000)
info->flags |= FLASH_FLAG_32BIT_ADDR;
/*
* Configure READ/WRITE/ERASE sequences according to platform and
* device flags.
*/
if (info->config)
ret = info->config(fsm);
else
ret = stfsm_prepare_rwe_seqs_default(fsm);
if (ret)
return ret;
fsm->mtd.name = info->name;
fsm->mtd.dev.parent = &pdev->dev;
mtd_set_of_node(&fsm->mtd, np);
fsm->mtd.type = MTD_NORFLASH;
fsm->mtd.writesize = 4;
fsm->mtd.writebufsize = fsm->mtd.writesize;
fsm->mtd.flags = MTD_CAP_NORFLASH;
fsm->mtd.size = info->sector_size * info->n_sectors;
fsm->mtd.erasesize = info->sector_size;
fsm->mtd._read = stfsm_mtd_read;
fsm->mtd._write = stfsm_mtd_write;
fsm->mtd._erase = stfsm_mtd_erase;
dev_info(&pdev->dev,
"Found serial flash device: %s\n"
" size = %llx (%lldMiB) erasesize = 0x%08x (%uKiB)\n",
info->name,
(long long)fsm->mtd.size, (long long)(fsm->mtd.size >> 20),
fsm->mtd.erasesize, (fsm->mtd.erasesize >> 10));
return mtd_device_register(&fsm->mtd, NULL, 0);
}
static int stfsm_remove(struct platform_device *pdev)
{
struct stfsm *fsm = platform_get_drvdata(pdev);
WARN_ON(mtd_device_unregister(&fsm->mtd));
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int stfsmfsm_suspend(struct device *dev)
{
struct stfsm *fsm = dev_get_drvdata(dev);
clk_disable_unprepare(fsm->clk);
return 0;
}
static int stfsmfsm_resume(struct device *dev)
{
struct stfsm *fsm = dev_get_drvdata(dev);
return clk_prepare_enable(fsm->clk);
}
#endif
static SIMPLE_DEV_PM_OPS(stfsm_pm_ops, stfsmfsm_suspend, stfsmfsm_resume);
static const struct of_device_id stfsm_match[] = {
{ .compatible = "st,spi-fsm", },
{},
};
MODULE_DEVICE_TABLE(of, stfsm_match);
static struct platform_driver stfsm_driver = {
.probe = stfsm_probe,
.remove = stfsm_remove,
.driver = {
.name = "st-spi-fsm",
.of_match_table = stfsm_match,
.pm = &stfsm_pm_ops,
},
};
module_platform_driver(stfsm_driver);
MODULE_AUTHOR("Angus Clark <[email protected]>");
MODULE_DESCRIPTION("ST SPI FSM driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/devices/st_spi_fsm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OPAL PNOR flash MTD abstraction
*
* Copyright IBM 2015
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/opal.h>
/*
* This driver creates the a Linux MTD abstraction for platform PNOR flash
* backed by OPAL calls
*/
struct powernv_flash {
struct mtd_info mtd;
u32 id;
};
enum flash_op {
FLASH_OP_READ,
FLASH_OP_WRITE,
FLASH_OP_ERASE,
};
/*
* Don't return -ERESTARTSYS if we can't get a token, the MTD core
* might have split up the call from userspace and called into the
* driver more than once, we'll already have done some amount of work.
*/
static int powernv_flash_async_op(struct mtd_info *mtd, enum flash_op op,
loff_t offset, size_t len, size_t *retlen, u_char *buf)
{
struct powernv_flash *info = (struct powernv_flash *)mtd->priv;
struct device *dev = &mtd->dev;
int token;
struct opal_msg msg;
int rc;
dev_dbg(dev, "%s(op=%d, offset=0x%llx, len=%zu)\n",
__func__, op, offset, len);
token = opal_async_get_token_interruptible();
if (token < 0) {
if (token != -ERESTARTSYS)
dev_err(dev, "Failed to get an async token\n");
else
token = -EINTR;
return token;
}
switch (op) {
case FLASH_OP_READ:
rc = opal_flash_read(info->id, offset, __pa(buf), len, token);
break;
case FLASH_OP_WRITE:
rc = opal_flash_write(info->id, offset, __pa(buf), len, token);
break;
case FLASH_OP_ERASE:
rc = opal_flash_erase(info->id, offset, len, token);
break;
default:
WARN_ON_ONCE(1);
opal_async_release_token(token);
return -EIO;
}
if (rc == OPAL_ASYNC_COMPLETION) {
rc = opal_async_wait_response_interruptible(token, &msg);
if (rc) {
/*
* If we return the mtd core will free the
* buffer we've just passed to OPAL but OPAL
* will continue to read or write from that
* memory.
* It may be tempting to ultimately return 0
* if we're doing a read or a write since we
* are going to end up waiting until OPAL is
* done. However, because the MTD core sends
* us the userspace request in chunks, we need
* it to know we've been interrupted.
*/
rc = -EINTR;
if (opal_async_wait_response(token, &msg))
dev_err(dev, "opal_async_wait_response() failed\n");
goto out;
}
rc = opal_get_async_rc(msg);
}
/*
* OPAL does mutual exclusion on the flash, it will return
* OPAL_BUSY.
* During firmware updates by the service processor OPAL may
* be (temporarily) prevented from accessing the flash, in
* this case OPAL will also return OPAL_BUSY.
* Both cases aren't errors exactly but the flash could have
* changed, userspace should be informed.
*/
if (rc != OPAL_SUCCESS && rc != OPAL_BUSY)
dev_err(dev, "opal_flash_async_op(op=%d) failed (rc %d)\n",
op, rc);
if (rc == OPAL_SUCCESS && retlen)
*retlen = len;
rc = opal_error_code(rc);
out:
opal_async_release_token(token);
return rc;
}
/**
* powernv_flash_read
* @mtd: the device
* @from: the offset to read from
* @len: the number of bytes to read
* @retlen: the number of bytes actually read
* @buf: the filled in buffer
*
* Returns 0 if read successful, or -ERRNO if an error occurred
*/
static int powernv_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
return powernv_flash_async_op(mtd, FLASH_OP_READ, from,
len, retlen, buf);
}
/**
* powernv_flash_write
* @mtd: the device
* @to: the offset to write to
* @len: the number of bytes to write
* @retlen: the number of bytes actually written
* @buf: the buffer to get bytes from
*
* Returns 0 if write successful, -ERRNO if error occurred
*/
static int powernv_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
return powernv_flash_async_op(mtd, FLASH_OP_WRITE, to,
len, retlen, (u_char *)buf);
}
/**
* powernv_flash_erase
* @mtd: the device
* @erase: the erase info
* Returns 0 if erase successful or -ERRNO if an error occurred
*/
static int powernv_flash_erase(struct mtd_info *mtd, struct erase_info *erase)
{
int rc;
rc = powernv_flash_async_op(mtd, FLASH_OP_ERASE, erase->addr,
erase->len, NULL, NULL);
if (rc)
erase->fail_addr = erase->addr;
return rc;
}
/**
* powernv_flash_set_driver_info - Fill the mtd_info structure and docg3
* @dev: The device structure
* @mtd: The structure to fill
*/
static int powernv_flash_set_driver_info(struct device *dev,
struct mtd_info *mtd)
{
u64 size;
u32 erase_size;
int rc;
rc = of_property_read_u32(dev->of_node, "ibm,flash-block-size",
&erase_size);
if (rc) {
dev_err(dev, "couldn't get resource block size information\n");
return rc;
}
rc = of_property_read_u64(dev->of_node, "reg", &size);
if (rc) {
dev_err(dev, "couldn't get resource size information\n");
return rc;
}
/*
* Going to have to check what details I need to set and how to
* get them
*/
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
mtd->type = MTD_NORFLASH;
mtd->flags = MTD_WRITEABLE;
mtd->size = size;
mtd->erasesize = erase_size;
mtd->writebufsize = mtd->writesize = 1;
mtd->owner = THIS_MODULE;
mtd->_erase = powernv_flash_erase;
mtd->_read = powernv_flash_read;
mtd->_write = powernv_flash_write;
mtd->dev.parent = dev;
mtd_set_of_node(mtd, dev->of_node);
return 0;
}
/**
* powernv_flash_probe
* @pdev: platform device
*
* Returns 0 on success, -ENOMEM, -ENXIO on error
*/
static int powernv_flash_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct powernv_flash *data;
int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->mtd.priv = data;
ret = of_property_read_u32(dev->of_node, "ibm,opal-id", &(data->id));
if (ret) {
dev_err(dev, "no device property 'ibm,opal-id'\n");
return ret;
}
ret = powernv_flash_set_driver_info(dev, &data->mtd);
if (ret)
return ret;
dev_set_drvdata(dev, data);
/*
* The current flash that skiboot exposes is one contiguous flash chip
* with an ffs partition at the start, it should prove easier for users
* to deal with partitions or not as they see fit
*/
return mtd_device_register(&data->mtd, NULL, 0);
}
/**
* op_release - Release the driver
* @pdev: the platform device
*
* Returns 0
*/
static int powernv_flash_release(struct platform_device *pdev)
{
struct powernv_flash *data = dev_get_drvdata(&(pdev->dev));
/* All resources should be freed automatically */
WARN_ON(mtd_device_unregister(&data->mtd));
return 0;
}
static const struct of_device_id powernv_flash_match[] = {
{ .compatible = "ibm,opal-flash" },
{}
};
static struct platform_driver powernv_flash_driver = {
.driver = {
.name = "powernv_flash",
.of_match_table = powernv_flash_match,
},
.remove = powernv_flash_release,
.probe = powernv_flash_probe,
};
module_platform_driver(powernv_flash_driver);
MODULE_DEVICE_TABLE(of, powernv_flash_match);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cyril Bur <[email protected]>");
MODULE_DESCRIPTION("MTD abstraction for OPAL flash");
| linux-master | drivers/mtd/devices/powernv_flash.c |
/*
* mtdram - a test mtd device
* Author: Alexander Larsson <[email protected]>
*
* Copyright (c) 1999 Alexander Larsson <[email protected]>
* Copyright (c) 2005 Joern Engel <[email protected]>
*
* This code is GPL
*
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/mtdram.h>
static unsigned long total_size = CONFIG_MTDRAM_TOTAL_SIZE;
static unsigned long erase_size = CONFIG_MTDRAM_ERASE_SIZE;
static unsigned long writebuf_size = 64;
#define MTDRAM_TOTAL_SIZE (total_size * 1024)
#define MTDRAM_ERASE_SIZE (erase_size * 1024)
module_param(total_size, ulong, 0);
MODULE_PARM_DESC(total_size, "Total device size in KiB");
module_param(erase_size, ulong, 0);
MODULE_PARM_DESC(erase_size, "Device erase block size in KiB");
module_param(writebuf_size, ulong, 0);
MODULE_PARM_DESC(writebuf_size, "Device write buf size in Bytes (Default: 64)");
// We could store these in the mtd structure, but we only support 1 device..
static struct mtd_info *mtd_info;
static int check_offs_len(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret = 0;
/* Start address must align on block boundary */
if (mtd_mod_by_eb(ofs, mtd)) {
pr_debug("%s: unaligned address\n", __func__);
ret = -EINVAL;
}
/* Length must align on block boundary */
if (mtd_mod_by_eb(len, mtd)) {
pr_debug("%s: length not block aligned\n", __func__);
ret = -EINVAL;
}
return ret;
}
static int ram_erase(struct mtd_info *mtd, struct erase_info *instr)
{
if (check_offs_len(mtd, instr->addr, instr->len))
return -EINVAL;
memset((char *)mtd->priv + instr->addr, 0xff, instr->len);
return 0;
}
static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
*virt = mtd->priv + from;
*retlen = len;
if (phys) {
/* limit retlen to the number of contiguous physical pages */
unsigned long page_ofs = offset_in_page(*virt);
void *addr = *virt - page_ofs;
unsigned long pfn1, pfn0 = vmalloc_to_pfn(addr);
*phys = __pfn_to_phys(pfn0) + page_ofs;
len += page_ofs;
while (len > PAGE_SIZE) {
len -= PAGE_SIZE;
addr += PAGE_SIZE;
pfn0++;
pfn1 = vmalloc_to_pfn(addr);
if (pfn1 != pfn0) {
*retlen = addr - *virt;
break;
}
}
}
return 0;
}
static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
return 0;
}
static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
memcpy(buf, mtd->priv + from, len);
*retlen = len;
return 0;
}
static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
memcpy((char *)mtd->priv + to, buf, len);
*retlen = len;
return 0;
}
static void __exit cleanup_mtdram(void)
{
if (mtd_info) {
mtd_device_unregister(mtd_info);
vfree(mtd_info->priv);
kfree(mtd_info);
}
}
int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
unsigned long size, const char *name)
{
memset(mtd, 0, sizeof(*mtd));
/* Setup the MTD structure */
mtd->name = name;
mtd->type = MTD_RAM;
mtd->flags = MTD_CAP_RAM;
mtd->size = size;
mtd->writesize = 1;
mtd->writebufsize = writebuf_size;
mtd->erasesize = MTDRAM_ERASE_SIZE;
mtd->priv = mapped_address;
mtd->owner = THIS_MODULE;
mtd->_erase = ram_erase;
mtd->_point = ram_point;
mtd->_unpoint = ram_unpoint;
mtd->_read = ram_read;
mtd->_write = ram_write;
if (mtd_device_register(mtd, NULL, 0))
return -EIO;
return 0;
}
static int __init init_mtdram(void)
{
void *addr;
int err;
if (!total_size)
return -EINVAL;
/* Allocate some memory */
mtd_info = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
if (!mtd_info)
return -ENOMEM;
addr = vmalloc(MTDRAM_TOTAL_SIZE);
if (!addr) {
kfree(mtd_info);
mtd_info = NULL;
return -ENOMEM;
}
err = mtdram_init_device(mtd_info, addr, MTDRAM_TOTAL_SIZE, "mtdram test device");
if (err) {
vfree(addr);
kfree(mtd_info);
mtd_info = NULL;
return err;
}
memset(mtd_info->priv, 0xff, MTDRAM_TOTAL_SIZE);
return err;
}
module_init(init_mtdram);
module_exit(cleanup_mtdram);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Larsson <[email protected]>");
MODULE_DESCRIPTION("Simulated MTD driver for testing");
| linux-master | drivers/mtd/devices/mtdram.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/mtd/mtd.h>
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
#include "bcm47xxsflash.h"
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
static const char * const probes[] = { "bcm47xxpart", NULL };
/**************************************************
* Various helpers
**************************************************/
static void bcm47xxsflash_cmd(struct bcm47xxsflash *b47s, u32 opcode)
{
int i;
b47s->cc_write(b47s, BCMA_CC_FLASHCTL, BCMA_CC_FLASHCTL_START | opcode);
for (i = 0; i < 1000; i++) {
if (!(b47s->cc_read(b47s, BCMA_CC_FLASHCTL) &
BCMA_CC_FLASHCTL_BUSY))
return;
cpu_relax();
}
pr_err("Control command failed (timeout)!\n");
}
static int bcm47xxsflash_poll(struct bcm47xxsflash *b47s, int timeout)
{
unsigned long deadline = jiffies + timeout;
do {
switch (b47s->type) {
case BCM47XXSFLASH_TYPE_ST:
bcm47xxsflash_cmd(b47s, OPCODE_ST_RDSR);
if (!(b47s->cc_read(b47s, BCMA_CC_FLASHDATA) &
SR_ST_WIP))
return 0;
break;
case BCM47XXSFLASH_TYPE_ATMEL:
bcm47xxsflash_cmd(b47s, OPCODE_AT_STATUS);
if (b47s->cc_read(b47s, BCMA_CC_FLASHDATA) &
SR_AT_READY)
return 0;
break;
}
cpu_relax();
udelay(1);
} while (!time_after_eq(jiffies, deadline));
pr_err("Timeout waiting for flash to be ready!\n");
return -EBUSY;
}
/**************************************************
* MTD ops
**************************************************/
static int bcm47xxsflash_erase(struct mtd_info *mtd, struct erase_info *erase)
{
struct bcm47xxsflash *b47s = mtd->priv;
switch (b47s->type) {
case BCM47XXSFLASH_TYPE_ST:
bcm47xxsflash_cmd(b47s, OPCODE_ST_WREN);
b47s->cc_write(b47s, BCMA_CC_FLASHADDR, erase->addr);
/* Newer flashes have "sub-sectors" which can be erased
* independently with a new command: ST_SSE. The ST_SE command
* erases 64KB just as before.
*/
if (b47s->blocksize < (64 * 1024))
bcm47xxsflash_cmd(b47s, OPCODE_ST_SSE);
else
bcm47xxsflash_cmd(b47s, OPCODE_ST_SE);
break;
case BCM47XXSFLASH_TYPE_ATMEL:
b47s->cc_write(b47s, BCMA_CC_FLASHADDR, erase->addr << 1);
bcm47xxsflash_cmd(b47s, OPCODE_AT_PAGE_ERASE);
break;
}
return bcm47xxsflash_poll(b47s, HZ);
}
static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct bcm47xxsflash *b47s = mtd->priv;
size_t orig_len = len;
/* Check address range */
if ((from + len) > mtd->size)
return -EINVAL;
/* Read as much as possible using fast MMIO window */
if (from < BCM47XXSFLASH_WINDOW_SZ) {
size_t memcpy_len;
memcpy_len = min(len, (size_t)(BCM47XXSFLASH_WINDOW_SZ - from));
memcpy_fromio(buf, b47s->window + from, memcpy_len);
from += memcpy_len;
len -= memcpy_len;
buf += memcpy_len;
}
/* Use indirect access for content out of the window */
for (; len; len--) {
b47s->cc_write(b47s, BCMA_CC_FLASHADDR, from++);
bcm47xxsflash_cmd(b47s, OPCODE_ST_READ4B);
*buf++ = b47s->cc_read(b47s, BCMA_CC_FLASHDATA);
}
*retlen = orig_len;
return orig_len;
}
static int bcm47xxsflash_write_st(struct mtd_info *mtd, u32 offset, size_t len,
const u_char *buf)
{
struct bcm47xxsflash *b47s = mtd->priv;
int written = 0;
/* Enable writes */
bcm47xxsflash_cmd(b47s, OPCODE_ST_WREN);
/* Write first byte */
b47s->cc_write(b47s, BCMA_CC_FLASHADDR, offset);
b47s->cc_write(b47s, BCMA_CC_FLASHDATA, *buf++);
/* Program page */
if (b47s->bcma_cc->core->id.rev < 20) {
bcm47xxsflash_cmd(b47s, OPCODE_ST_PP);
return 1; /* 1B written */
}
/* Program page and set CSA (on newer chips we can continue writing) */
bcm47xxsflash_cmd(b47s, OPCODE_ST_CSA | OPCODE_ST_PP);
offset++;
len--;
written++;
while (len > 0) {
/* Page boundary, another function call is needed */
if ((offset & 0xFF) == 0)
break;
bcm47xxsflash_cmd(b47s, OPCODE_ST_CSA | *buf++);
offset++;
len--;
written++;
}
/* All done, drop CSA & poll */
b47s->cc_write(b47s, BCMA_CC_FLASHCTL, 0);
udelay(1);
if (bcm47xxsflash_poll(b47s, HZ / 10))
pr_err("Flash rejected dropping CSA\n");
return written;
}
static int bcm47xxsflash_write_at(struct mtd_info *mtd, u32 offset, size_t len,
const u_char *buf)
{
struct bcm47xxsflash *b47s = mtd->priv;
u32 mask = b47s->blocksize - 1;
u32 page = (offset & ~mask) << 1;
u32 byte = offset & mask;
int written = 0;
/* If we don't overwrite whole page, read it to the buffer first */
if (byte || (len < b47s->blocksize)) {
int err;
b47s->cc_write(b47s, BCMA_CC_FLASHADDR, page);
bcm47xxsflash_cmd(b47s, OPCODE_AT_BUF1_LOAD);
/* 250 us for AT45DB321B */
err = bcm47xxsflash_poll(b47s, HZ / 1000);
if (err) {
pr_err("Timeout reading page 0x%X info buffer\n", page);
return err;
}
}
/* Change buffer content with our data */
while (len > 0) {
/* Page boundary, another function call is needed */
if (byte == b47s->blocksize)
break;
b47s->cc_write(b47s, BCMA_CC_FLASHADDR, byte++);
b47s->cc_write(b47s, BCMA_CC_FLASHDATA, *buf++);
bcm47xxsflash_cmd(b47s, OPCODE_AT_BUF1_WRITE);
len--;
written++;
}
/* Program page with the buffer content */
b47s->cc_write(b47s, BCMA_CC_FLASHADDR, page);
bcm47xxsflash_cmd(b47s, OPCODE_AT_BUF1_PROGRAM);
return written;
}
static int bcm47xxsflash_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct bcm47xxsflash *b47s = mtd->priv;
int written;
/* Writing functions can return without writing all passed data, for
* example when the hardware is too old or when we git page boundary.
*/
while (len > 0) {
switch (b47s->type) {
case BCM47XXSFLASH_TYPE_ST:
written = bcm47xxsflash_write_st(mtd, to, len, buf);
break;
case BCM47XXSFLASH_TYPE_ATMEL:
written = bcm47xxsflash_write_at(mtd, to, len, buf);
break;
default:
BUG_ON(1);
}
if (written < 0) {
pr_err("Error writing at offset 0x%llX\n", to);
return written;
}
to += (loff_t)written;
len -= written;
*retlen += written;
buf += written;
}
return 0;
}
static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s,
struct device *dev)
{
struct mtd_info *mtd = &b47s->mtd;
mtd->priv = b47s;
mtd->dev.parent = dev;
mtd->name = "bcm47xxsflash";
mtd->type = MTD_NORFLASH;
mtd->flags = MTD_CAP_NORFLASH;
mtd->size = b47s->size;
mtd->erasesize = b47s->blocksize;
mtd->writesize = 1;
mtd->writebufsize = 1;
mtd->_erase = bcm47xxsflash_erase;
mtd->_read = bcm47xxsflash_read;
mtd->_write = bcm47xxsflash_write;
}
/**************************************************
* BCMA
**************************************************/
static int bcm47xxsflash_bcma_cc_read(struct bcm47xxsflash *b47s, u16 offset)
{
return bcma_cc_read32(b47s->bcma_cc, offset);
}
static void bcm47xxsflash_bcma_cc_write(struct bcm47xxsflash *b47s, u16 offset,
u32 value)
{
bcma_cc_write32(b47s->bcma_cc, offset, value);
}
static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bcma_sflash *sflash = dev_get_platdata(dev);
struct bcm47xxsflash *b47s;
struct resource *res;
int err;
b47s = devm_kzalloc(dev, sizeof(*b47s), GFP_KERNEL);
if (!b47s)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "invalid resource\n");
return -EINVAL;
}
if (!devm_request_mem_region(dev, res->start, resource_size(res),
res->name)) {
dev_err(dev, "can't request region for resource %pR\n", res);
return -EBUSY;
}
b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash);
b47s->cc_read = bcm47xxsflash_bcma_cc_read;
b47s->cc_write = bcm47xxsflash_bcma_cc_write;
/*
* On old MIPS devices cache was magically invalidated when needed,
* allowing us to use cached access and gain some performance. Trying
* the same on ARM based BCM53573 results in flash corruptions, we need
* to use uncached access for it.
*
* It may be arch specific, but right now there is only 1 ARM SoC using
* this driver, so let's follow Broadcom's reference code and check
* ChipCommon revision.
*/
if (b47s->bcma_cc->core->id.rev == 54)
b47s->window = ioremap(res->start, resource_size(res));
else
b47s->window = ioremap_cache(res->start, resource_size(res));
if (!b47s->window) {
dev_err(dev, "ioremap failed for resource %pR\n", res);
return -ENOMEM;
}
switch (b47s->bcma_cc->capabilities & BCMA_CC_CAP_FLASHT) {
case BCMA_CC_FLASHT_STSER:
b47s->type = BCM47XXSFLASH_TYPE_ST;
break;
case BCMA_CC_FLASHT_ATSER:
b47s->type = BCM47XXSFLASH_TYPE_ATMEL;
break;
}
b47s->blocksize = sflash->blocksize;
b47s->numblocks = sflash->numblocks;
b47s->size = sflash->size;
bcm47xxsflash_fill_mtd(b47s, &pdev->dev);
platform_set_drvdata(pdev, b47s);
err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0);
if (err) {
pr_err("Failed to register MTD device: %d\n", err);
iounmap(b47s->window);
return err;
}
if (bcm47xxsflash_poll(b47s, HZ / 10))
pr_warn("Serial flash busy\n");
return 0;
}
static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
{
struct bcm47xxsflash *b47s = platform_get_drvdata(pdev);
mtd_device_unregister(&b47s->mtd);
iounmap(b47s->window);
return 0;
}
static struct platform_driver bcma_sflash_driver = {
.probe = bcm47xxsflash_bcma_probe,
.remove = bcm47xxsflash_bcma_remove,
.driver = {
.name = "bcma_sflash",
},
};
/**************************************************
* Init
**************************************************/
module_platform_driver(bcma_sflash_driver);
| linux-master | drivers/mtd/devices/bcm47xxsflash.c |
// SPDX-License-Identifier: GPL-2.0-only
/*======================================================================
This driver provides a method to access memory not used by the kernel
itself (i.e. if the kernel commandline mem=xxx is used). To actually
use slram at least mtdblock or mtdchar is required (for block or
character device access).
Usage:
if compiled as loadable module:
modprobe slram map=<name>,<start>,<end/offset>
if statically linked into the kernel use the following kernel cmd.line
slram=<name>,<start>,<end/offset>
<name>: name of the device that will be listed in /proc/mtd
<start>: start of the memory region, decimal or hex (0xabcdef)
<end/offset>: end of the memory region. It's possible to use +0x1234
to specify the offset instead of the absolute address
NOTE:
With slram it's only possible to map a contiguous memory region. Therefore
if there's a device mapped somewhere in the region specified slram will
fail to load (see kernel log if modprobe fails).
-
Jochen Schaeuble <[email protected]>
======================================================================*/
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#define SLRAM_MAX_DEVICES_PARAMS 6 /* 3 parameters / device */
#define SLRAM_BLK_SZ 0x4000
#define T(fmt, args...) printk(KERN_DEBUG fmt, ## args)
#define E(fmt, args...) printk(KERN_NOTICE fmt, ## args)
typedef struct slram_priv {
u_char *start;
u_char *end;
} slram_priv_t;
typedef struct slram_mtd_list {
struct mtd_info *mtdinfo;
struct slram_mtd_list *next;
} slram_mtd_list_t;
#ifdef MODULE
static char *map[SLRAM_MAX_DEVICES_PARAMS];
module_param_array(map, charp, NULL, 0);
MODULE_PARM_DESC(map, "List of memory regions to map. \"map=<name>, <start>, <length / end>\"");
#else
static char *map;
#endif
static slram_mtd_list_t *slram_mtdlist = NULL;
static int slram_erase(struct mtd_info *, struct erase_info *);
static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, void **,
resource_size_t *);
static int slram_unpoint(struct mtd_info *, loff_t, size_t);
static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int slram_erase(struct mtd_info *mtd, struct erase_info *instr)
{
slram_priv_t *priv = mtd->priv;
memset(priv->start + instr->addr, 0xff, instr->len);
return(0);
}
static int slram_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
slram_priv_t *priv = mtd->priv;
*virt = priv->start + from;
*retlen = len;
return(0);
}
static int slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
return 0;
}
static int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
slram_priv_t *priv = mtd->priv;
memcpy(buf, priv->start + from, len);
*retlen = len;
return(0);
}
static int slram_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
slram_priv_t *priv = mtd->priv;
memcpy(priv->start + to, buf, len);
*retlen = len;
return(0);
}
/*====================================================================*/
static int register_device(char *name, unsigned long start, unsigned long length)
{
slram_mtd_list_t **curmtd;
curmtd = &slram_mtdlist;
while (*curmtd) {
curmtd = &(*curmtd)->next;
}
*curmtd = kmalloc(sizeof(slram_mtd_list_t), GFP_KERNEL);
if (!(*curmtd)) {
E("slram: Cannot allocate new MTD device.\n");
return(-ENOMEM);
}
(*curmtd)->mtdinfo = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
(*curmtd)->next = NULL;
if ((*curmtd)->mtdinfo) {
(*curmtd)->mtdinfo->priv =
kzalloc(sizeof(slram_priv_t), GFP_KERNEL);
if (!(*curmtd)->mtdinfo->priv) {
kfree((*curmtd)->mtdinfo);
(*curmtd)->mtdinfo = NULL;
}
}
if (!(*curmtd)->mtdinfo) {
E("slram: Cannot allocate new MTD device.\n");
return(-ENOMEM);
}
if (!(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start =
memremap(start, length,
MEMREMAP_WB | MEMREMAP_WT | MEMREMAP_WC))) {
E("slram: memremap failed\n");
return -EIO;
}
((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end =
((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start + length;
(*curmtd)->mtdinfo->name = name;
(*curmtd)->mtdinfo->size = length;
(*curmtd)->mtdinfo->flags = MTD_CAP_RAM;
(*curmtd)->mtdinfo->_erase = slram_erase;
(*curmtd)->mtdinfo->_point = slram_point;
(*curmtd)->mtdinfo->_unpoint = slram_unpoint;
(*curmtd)->mtdinfo->_read = slram_read;
(*curmtd)->mtdinfo->_write = slram_write;
(*curmtd)->mtdinfo->owner = THIS_MODULE;
(*curmtd)->mtdinfo->type = MTD_RAM;
(*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
(*curmtd)->mtdinfo->writesize = 1;
if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) {
E("slram: Failed to register new device\n");
memunmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
kfree((*curmtd)->mtdinfo->priv);
kfree((*curmtd)->mtdinfo);
return(-EAGAIN);
}
T("slram: Registered device %s from %luKiB to %luKiB\n", name,
(start / 1024), ((start + length) / 1024));
T("slram: Mapped from 0x%p to 0x%p\n",
((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start,
((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end);
return(0);
}
static void unregister_devices(void)
{
slram_mtd_list_t *nextitem;
while (slram_mtdlist) {
nextitem = slram_mtdlist->next;
mtd_device_unregister(slram_mtdlist->mtdinfo);
memunmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
kfree(slram_mtdlist->mtdinfo->priv);
kfree(slram_mtdlist->mtdinfo);
kfree(slram_mtdlist);
slram_mtdlist = nextitem;
}
}
static unsigned long handle_unit(unsigned long value, char *unit)
{
if ((*unit == 'M') || (*unit == 'm')) {
return(value * 1024 * 1024);
} else if ((*unit == 'K') || (*unit == 'k')) {
return(value * 1024);
}
return(value);
}
static int parse_cmdline(char *devname, char *szstart, char *szlength)
{
char *buffer;
unsigned long devstart;
unsigned long devlength;
if ((!devname) || (!szstart) || (!szlength)) {
unregister_devices();
return(-EINVAL);
}
devstart = simple_strtoul(szstart, &buffer, 0);
devstart = handle_unit(devstart, buffer);
if (*(szlength) != '+') {
devlength = simple_strtoul(szlength, &buffer, 0);
devlength = handle_unit(devlength, buffer);
if (devlength < devstart)
goto err_out;
devlength -= devstart;
} else {
devlength = simple_strtoul(szlength + 1, &buffer, 0);
devlength = handle_unit(devlength, buffer);
}
T("slram: devname=%s, devstart=0x%lx, devlength=0x%lx\n",
devname, devstart, devlength);
if (devlength % SLRAM_BLK_SZ != 0)
goto err_out;
if ((devstart = register_device(devname, devstart, devlength))){
unregister_devices();
return((int)devstart);
}
return(0);
err_out:
E("slram: Illegal length parameter.\n");
return(-EINVAL);
}
#ifndef MODULE
static int __init mtd_slram_setup(char *str)
{
map = str;
return(1);
}
__setup("slram=", mtd_slram_setup);
#endif
static int __init init_slram(void)
{
char *devname;
#ifndef MODULE
char *devstart;
char *devlength;
if (!map) {
E("slram: not enough parameters.\n");
return(-EINVAL);
}
while (map) {
devname = devstart = devlength = NULL;
if (!(devname = strsep(&map, ","))) {
E("slram: No devicename specified.\n");
break;
}
T("slram: devname = %s\n", devname);
if ((!map) || (!(devstart = strsep(&map, ",")))) {
E("slram: No devicestart specified.\n");
}
T("slram: devstart = %s\n", devstart);
if ((!map) || (!(devlength = strsep(&map, ",")))) {
E("slram: No devicelength / -end specified.\n");
}
T("slram: devlength = %s\n", devlength);
if (parse_cmdline(devname, devstart, devlength) != 0) {
return(-EINVAL);
}
}
#else
int count;
int i;
for (count = 0; count < SLRAM_MAX_DEVICES_PARAMS && map[count];
count++) {
}
if ((count % 3 != 0) || (count == 0)) {
E("slram: not enough parameters.\n");
return(-EINVAL);
}
for (i = 0; i < (count / 3); i++) {
devname = map[i * 3];
if (parse_cmdline(devname, map[i * 3 + 1], map[i * 3 + 2])!=0) {
return(-EINVAL);
}
}
#endif /* !MODULE */
return(0);
}
static void __exit cleanup_slram(void)
{
unregister_devices();
}
module_init(init_slram);
module_exit(cleanup_slram);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jochen Schaeuble <[email protected]>");
MODULE_DESCRIPTION("MTD driver for uncached system RAM");
| linux-master | drivers/mtd/devices/slram.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Atmel AT45xxx DataFlash MTD driver for lightweight SPI framework
*
* Largely derived from at91_dataflash.c:
* Copyright (C) 2003-2005 SAN People (Pty) Ltd
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
/*
* DataFlash is a kind of SPI flash. Most AT45 chips have two buffers in
* each chip, which may be used for double buffered I/O; but this driver
* doesn't (yet) use these for any kind of i/o overlap or prefetching.
*
* Sometimes DataFlash is packaged in MMC-format cards, although the
* MMC stack can't (yet?) distinguish between MMC and DataFlash
* protocols during enumeration.
*/
/* reads can bypass the buffers */
#define OP_READ_CONTINUOUS 0xE8
#define OP_READ_PAGE 0xD2
/* group B requests can run even while status reports "busy" */
#define OP_READ_STATUS 0xD7 /* group B */
/* move data between host and buffer */
#define OP_READ_BUFFER1 0xD4 /* group B */
#define OP_READ_BUFFER2 0xD6 /* group B */
#define OP_WRITE_BUFFER1 0x84 /* group B */
#define OP_WRITE_BUFFER2 0x87 /* group B */
/* erasing flash */
#define OP_ERASE_PAGE 0x81
#define OP_ERASE_BLOCK 0x50
/* move data between buffer and flash */
#define OP_TRANSFER_BUF1 0x53
#define OP_TRANSFER_BUF2 0x55
#define OP_MREAD_BUFFER1 0xD4
#define OP_MREAD_BUFFER2 0xD6
#define OP_MWERASE_BUFFER1 0x83
#define OP_MWERASE_BUFFER2 0x86
#define OP_MWRITE_BUFFER1 0x88 /* sector must be pre-erased */
#define OP_MWRITE_BUFFER2 0x89 /* sector must be pre-erased */
/* write to buffer, then write-erase to flash */
#define OP_PROGRAM_VIA_BUF1 0x82
#define OP_PROGRAM_VIA_BUF2 0x85
/* compare buffer to flash */
#define OP_COMPARE_BUF1 0x60
#define OP_COMPARE_BUF2 0x61
/* read flash to buffer, then write-erase to flash */
#define OP_REWRITE_VIA_BUF1 0x58
#define OP_REWRITE_VIA_BUF2 0x59
/* newer chips report JEDEC manufacturer and device IDs; chip
* serial number and OTP bits; and per-sector writeprotect.
*/
#define OP_READ_ID 0x9F
#define OP_READ_SECURITY 0x77
#define OP_WRITE_SECURITY_REVC 0x9A
#define OP_WRITE_SECURITY 0x9B /* revision D */
#define CFI_MFR_ATMEL 0x1F
#define DATAFLASH_SHIFT_EXTID 24
#define DATAFLASH_SHIFT_ID 40
struct dataflash {
u8 command[4];
char name[24];
unsigned short page_offset; /* offset in flash address */
unsigned int page_size; /* of bytes per page */
struct mutex lock;
struct spi_device *spi;
struct mtd_info mtd;
};
#ifdef CONFIG_OF
static const struct of_device_id dataflash_dt_ids[] = {
{ .compatible = "atmel,at45", },
{ .compatible = "atmel,dataflash", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, dataflash_dt_ids);
#endif
static const struct spi_device_id dataflash_spi_ids[] = {
{ .name = "at45", },
{ .name = "dataflash", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(spi, dataflash_spi_ids);
/* ......................................................................... */
/*
* Return the status of the DataFlash device.
*/
static inline int dataflash_status(struct spi_device *spi)
{
/* NOTE: at45db321c over 25 MHz wants to write
* a dummy byte after the opcode...
*/
return spi_w8r8(spi, OP_READ_STATUS);
}
/*
* Poll the DataFlash device until it is READY.
* This usually takes 5-20 msec or so; more for sector erase.
*/
static int dataflash_waitready(struct spi_device *spi)
{
int status;
for (;;) {
status = dataflash_status(spi);
if (status < 0) {
dev_dbg(&spi->dev, "status %d?\n", status);
status = 0;
}
if (status & (1 << 7)) /* RDY/nBSY */
return status;
usleep_range(3000, 4000);
}
}
/* ......................................................................... */
/*
* Erase pages of flash.
*/
static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct dataflash *priv = mtd->priv;
struct spi_device *spi = priv->spi;
struct spi_transfer x = { };
struct spi_message msg;
unsigned blocksize = priv->page_size << 3;
u8 *command;
u32 rem;
dev_dbg(&spi->dev, "erase addr=0x%llx len 0x%llx\n",
(long long)instr->addr, (long long)instr->len);
div_u64_rem(instr->len, priv->page_size, &rem);
if (rem)
return -EINVAL;
div_u64_rem(instr->addr, priv->page_size, &rem);
if (rem)
return -EINVAL;
spi_message_init(&msg);
x.tx_buf = command = priv->command;
x.len = 4;
spi_message_add_tail(&x, &msg);
mutex_lock(&priv->lock);
while (instr->len > 0) {
unsigned int pageaddr;
int status;
int do_block;
/* Calculate flash page address; use block erase (for speed) if
* we're at a block boundary and need to erase the whole block.
*/
pageaddr = div_u64(instr->addr, priv->page_size);
do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize;
pageaddr = pageaddr << priv->page_offset;
command[0] = do_block ? OP_ERASE_BLOCK : OP_ERASE_PAGE;
command[1] = (u8)(pageaddr >> 16);
command[2] = (u8)(pageaddr >> 8);
command[3] = 0;
dev_dbg(&spi->dev, "ERASE %s: (%x) %x %x %x [%i]\n",
do_block ? "block" : "page",
command[0], command[1], command[2], command[3],
pageaddr);
status = spi_sync(spi, &msg);
(void) dataflash_waitready(spi);
if (status < 0) {
dev_err(&spi->dev, "erase %x, err %d\n",
pageaddr, status);
/* REVISIT: can retry instr->retries times; or
* giveup and instr->fail_addr = instr->addr;
*/
continue;
}
if (do_block) {
instr->addr += blocksize;
instr->len -= blocksize;
} else {
instr->addr += priv->page_size;
instr->len -= priv->page_size;
}
}
mutex_unlock(&priv->lock);
return 0;
}
/*
* Read from the DataFlash device.
* from : Start offset in flash device
* len : Amount to read
* retlen : About of data actually read
* buf : Buffer containing the data
*/
static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct dataflash *priv = mtd->priv;
struct spi_transfer x[2] = { };
struct spi_message msg;
unsigned int addr;
u8 *command;
int status;
dev_dbg(&priv->spi->dev, "read 0x%x..0x%x\n",
(unsigned int)from, (unsigned int)(from + len));
/* Calculate flash page/byte address */
addr = (((unsigned)from / priv->page_size) << priv->page_offset)
+ ((unsigned)from % priv->page_size);
command = priv->command;
dev_dbg(&priv->spi->dev, "READ: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
spi_message_init(&msg);
x[0].tx_buf = command;
x[0].len = 8;
spi_message_add_tail(&x[0], &msg);
x[1].rx_buf = buf;
x[1].len = len;
spi_message_add_tail(&x[1], &msg);
mutex_lock(&priv->lock);
/* Continuous read, max clock = f(car) which may be less than
* the peak rate available. Some chips support commands with
* fewer "don't care" bytes. Both buffers stay unchanged.
*/
command[0] = OP_READ_CONTINUOUS;
command[1] = (u8)(addr >> 16);
command[2] = (u8)(addr >> 8);
command[3] = (u8)(addr >> 0);
/* plus 4 "don't care" bytes */
status = spi_sync(priv->spi, &msg);
mutex_unlock(&priv->lock);
if (status >= 0) {
*retlen = msg.actual_length - 8;
status = 0;
} else
dev_dbg(&priv->spi->dev, "read %x..%x --> %d\n",
(unsigned)from, (unsigned)(from + len),
status);
return status;
}
/*
* Write to the DataFlash device.
* to : Start offset in flash device
* len : Amount to write
* retlen : Amount of data actually written
* buf : Buffer containing the data
*/
static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t * retlen, const u_char * buf)
{
struct dataflash *priv = mtd->priv;
struct spi_device *spi = priv->spi;
struct spi_transfer x[2] = { };
struct spi_message msg;
unsigned int pageaddr, addr, offset, writelen;
size_t remaining = len;
u_char *writebuf = (u_char *) buf;
int status = -EINVAL;
u8 *command;
dev_dbg(&spi->dev, "write 0x%x..0x%x\n",
(unsigned int)to, (unsigned int)(to + len));
spi_message_init(&msg);
x[0].tx_buf = command = priv->command;
x[0].len = 4;
spi_message_add_tail(&x[0], &msg);
pageaddr = ((unsigned)to / priv->page_size);
offset = ((unsigned)to % priv->page_size);
if (offset + len > priv->page_size)
writelen = priv->page_size - offset;
else
writelen = len;
mutex_lock(&priv->lock);
while (remaining > 0) {
dev_dbg(&spi->dev, "write @ %i:%i len=%i\n",
pageaddr, offset, writelen);
/* REVISIT:
* (a) each page in a sector must be rewritten at least
* once every 10K sibling erase/program operations.
* (b) for pages that are already erased, we could
* use WRITE+MWRITE not PROGRAM for ~30% speedup.
* (c) WRITE to buffer could be done while waiting for
* a previous MWRITE/MWERASE to complete ...
* (d) error handling here seems to be mostly missing.
*
* Two persistent bits per page, plus a per-sector counter,
* could support (a) and (b) ... we might consider using
* the second half of sector zero, which is just one block,
* to track that state. (On AT91, that sector should also
* support boot-from-DataFlash.)
*/
addr = pageaddr << priv->page_offset;
/* (1) Maybe transfer partial page to Buffer1 */
if (writelen != priv->page_size) {
command[0] = OP_TRANSFER_BUF1;
command[1] = (addr & 0x00FF0000) >> 16;
command[2] = (addr & 0x0000FF00) >> 8;
command[3] = 0;
dev_dbg(&spi->dev, "TRANSFER: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
status = spi_sync(spi, &msg);
if (status < 0)
dev_dbg(&spi->dev, "xfer %u -> %d\n",
addr, status);
(void) dataflash_waitready(priv->spi);
}
/* (2) Program full page via Buffer1 */
addr += offset;
command[0] = OP_PROGRAM_VIA_BUF1;
command[1] = (addr & 0x00FF0000) >> 16;
command[2] = (addr & 0x0000FF00) >> 8;
command[3] = (addr & 0x000000FF);
dev_dbg(&spi->dev, "PROGRAM: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
x[1].tx_buf = writebuf;
x[1].len = writelen;
spi_message_add_tail(x + 1, &msg);
status = spi_sync(spi, &msg);
spi_transfer_del(x + 1);
if (status < 0)
dev_dbg(&spi->dev, "pgm %u/%u -> %d\n",
addr, writelen, status);
(void) dataflash_waitready(priv->spi);
#ifdef CONFIG_MTD_DATAFLASH_WRITE_VERIFY
/* (3) Compare to Buffer1 */
addr = pageaddr << priv->page_offset;
command[0] = OP_COMPARE_BUF1;
command[1] = (addr & 0x00FF0000) >> 16;
command[2] = (addr & 0x0000FF00) >> 8;
command[3] = 0;
dev_dbg(&spi->dev, "COMPARE: (%x) %x %x %x\n",
command[0], command[1], command[2], command[3]);
status = spi_sync(spi, &msg);
if (status < 0)
dev_dbg(&spi->dev, "compare %u -> %d\n",
addr, status);
status = dataflash_waitready(priv->spi);
/* Check result of the compare operation */
if (status & (1 << 6)) {
dev_err(&spi->dev, "compare page %u, err %d\n",
pageaddr, status);
remaining = 0;
status = -EIO;
break;
} else
status = 0;
#endif /* CONFIG_MTD_DATAFLASH_WRITE_VERIFY */
remaining = remaining - writelen;
pageaddr++;
offset = 0;
writebuf += writelen;
*retlen += writelen;
if (remaining > priv->page_size)
writelen = priv->page_size;
else
writelen = remaining;
}
mutex_unlock(&priv->lock);
return status;
}
/* ......................................................................... */
#ifdef CONFIG_MTD_DATAFLASH_OTP
static int dataflash_get_otp_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *info)
{
/* Report both blocks as identical: bytes 0..64, locked.
* Unless the user block changed from all-ones, we can't
* tell whether it's still writable; so we assume it isn't.
*/
info->start = 0;
info->length = 64;
info->locked = 1;
*retlen = sizeof(*info);
return 0;
}
static ssize_t otp_read(struct spi_device *spi, unsigned base,
u8 *buf, loff_t off, size_t len)
{
struct spi_message m;
size_t l;
u8 *scratch;
struct spi_transfer t;
int status;
if (off > 64)
return -EINVAL;
if ((off + len) > 64)
len = 64 - off;
spi_message_init(&m);
l = 4 + base + off + len;
scratch = kzalloc(l, GFP_KERNEL);
if (!scratch)
return -ENOMEM;
/* OUT: OP_READ_SECURITY, 3 don't-care bytes, zeroes
* IN: ignore 4 bytes, data bytes 0..N (max 127)
*/
scratch[0] = OP_READ_SECURITY;
memset(&t, 0, sizeof t);
t.tx_buf = scratch;
t.rx_buf = scratch;
t.len = l;
spi_message_add_tail(&t, &m);
dataflash_waitready(spi);
status = spi_sync(spi, &m);
if (status >= 0) {
memcpy(buf, scratch + 4 + base + off, len);
status = len;
}
kfree(scratch);
return status;
}
static int dataflash_read_fact_otp(struct mtd_info *mtd,
loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct dataflash *priv = mtd->priv;
int status;
/* 64 bytes, from 0..63 ... start at 64 on-chip */
mutex_lock(&priv->lock);
status = otp_read(priv->spi, 64, buf, from, len);
mutex_unlock(&priv->lock);
if (status < 0)
return status;
*retlen = status;
return 0;
}
static int dataflash_read_user_otp(struct mtd_info *mtd,
loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct dataflash *priv = mtd->priv;
int status;
/* 64 bytes, from 0..63 ... start at 0 on-chip */
mutex_lock(&priv->lock);
status = otp_read(priv->spi, 0, buf, from, len);
mutex_unlock(&priv->lock);
if (status < 0)
return status;
*retlen = status;
return 0;
}
static int dataflash_write_user_otp(struct mtd_info *mtd,
loff_t from, size_t len, size_t *retlen, const u_char *buf)
{
struct spi_message m;
const size_t l = 4 + 64;
u8 *scratch;
struct spi_transfer t;
struct dataflash *priv = mtd->priv;
int status;
if (from >= 64) {
/*
* Attempting to write beyond the end of OTP memory,
* no data can be written.
*/
*retlen = 0;
return 0;
}
/* Truncate the write to fit into OTP memory. */
if ((from + len) > 64)
len = 64 - from;
/* OUT: OP_WRITE_SECURITY, 3 zeroes, 64 data-or-zero bytes
* IN: ignore all
*/
scratch = kzalloc(l, GFP_KERNEL);
if (!scratch)
return -ENOMEM;
scratch[0] = OP_WRITE_SECURITY;
memcpy(scratch + 4 + from, buf, len);
spi_message_init(&m);
memset(&t, 0, sizeof t);
t.tx_buf = scratch;
t.len = l;
spi_message_add_tail(&t, &m);
/* Write the OTP bits, if they've not yet been written.
* This modifies SRAM buffer1.
*/
mutex_lock(&priv->lock);
dataflash_waitready(priv->spi);
status = spi_sync(priv->spi, &m);
mutex_unlock(&priv->lock);
kfree(scratch);
if (status >= 0) {
status = 0;
*retlen = len;
}
return status;
}
static char *otp_setup(struct mtd_info *device, char revision)
{
device->_get_fact_prot_info = dataflash_get_otp_info;
device->_read_fact_prot_reg = dataflash_read_fact_otp;
device->_get_user_prot_info = dataflash_get_otp_info;
device->_read_user_prot_reg = dataflash_read_user_otp;
/* rev c parts (at45db321c and at45db1281 only!) use a
* different write procedure; not (yet?) implemented.
*/
if (revision > 'c')
device->_write_user_prot_reg = dataflash_write_user_otp;
return ", OTP";
}
#else
static char *otp_setup(struct mtd_info *device, char revision)
{
return " (OTP)";
}
#endif
/* ......................................................................... */
/*
* Register DataFlash device with MTD subsystem.
*/
static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
int pagesize, int pageoffset, char revision)
{
struct dataflash *priv;
struct mtd_info *device;
struct flash_platform_data *pdata = dev_get_platdata(&spi->dev);
char *otp_tag = "";
int err = 0;
priv = kzalloc(sizeof *priv, GFP_KERNEL);
if (!priv)
return -ENOMEM;
mutex_init(&priv->lock);
priv->spi = spi;
priv->page_size = pagesize;
priv->page_offset = pageoffset;
/* name must be usable with cmdlinepart */
sprintf(priv->name, "spi%d.%d-%s",
spi->master->bus_num, spi_get_chipselect(spi, 0),
name);
device = &priv->mtd;
device->name = (pdata && pdata->name) ? pdata->name : priv->name;
device->size = nr_pages * pagesize;
device->erasesize = pagesize;
device->writesize = pagesize;
device->type = MTD_DATAFLASH;
device->flags = MTD_WRITEABLE;
device->_erase = dataflash_erase;
device->_read = dataflash_read;
device->_write = dataflash_write;
device->priv = priv;
device->dev.parent = &spi->dev;
mtd_set_of_node(device, spi->dev.of_node);
if (revision >= 'c')
otp_tag = otp_setup(device, revision);
dev_info(&spi->dev, "%s (%lld KBytes) pagesize %d bytes%s\n",
name, (long long)((device->size + 1023) >> 10),
pagesize, otp_tag);
spi_set_drvdata(spi, priv);
err = mtd_device_register(device,
pdata ? pdata->parts : NULL,
pdata ? pdata->nr_parts : 0);
if (!err)
return 0;
kfree(priv);
return err;
}
static inline int add_dataflash(struct spi_device *spi, char *name,
int nr_pages, int pagesize, int pageoffset)
{
return add_dataflash_otp(spi, name, nr_pages, pagesize,
pageoffset, 0);
}
struct flash_info {
char *name;
/* JEDEC id has a high byte of zero plus three data bytes:
* the manufacturer id, then a two byte device id.
*/
u64 jedec_id;
/* The size listed here is what works with OP_ERASE_PAGE. */
unsigned nr_pages;
u16 pagesize;
u16 pageoffset;
u16 flags;
#define SUP_EXTID 0x0004 /* supports extended ID data */
#define SUP_POW2PS 0x0002 /* supports 2^N byte pages */
#define IS_POW2PS 0x0001 /* uses 2^N byte pages */
};
static struct flash_info dataflash_data[] = {
/*
* NOTE: chips with SUP_POW2PS (rev D and up) need two entries,
* one with IS_POW2PS and the other without. The entry with the
* non-2^N byte page size can't name exact chip revisions without
* losing backwards compatibility for cmdlinepart.
*
* These newer chips also support 128-byte security registers (with
* 64 bytes one-time-programmable) and software write-protection.
*/
{ "AT45DB011B", 0x1f2200, 512, 264, 9, SUP_POW2PS},
{ "at45db011d", 0x1f2200, 512, 256, 8, SUP_POW2PS | IS_POW2PS},
{ "AT45DB021B", 0x1f2300, 1024, 264, 9, SUP_POW2PS},
{ "at45db021d", 0x1f2300, 1024, 256, 8, SUP_POW2PS | IS_POW2PS},
{ "AT45DB041x", 0x1f2400, 2048, 264, 9, SUP_POW2PS},
{ "at45db041d", 0x1f2400, 2048, 256, 8, SUP_POW2PS | IS_POW2PS},
{ "AT45DB081B", 0x1f2500, 4096, 264, 9, SUP_POW2PS},
{ "at45db081d", 0x1f2500, 4096, 256, 8, SUP_POW2PS | IS_POW2PS},
{ "AT45DB161x", 0x1f2600, 4096, 528, 10, SUP_POW2PS},
{ "at45db161d", 0x1f2600, 4096, 512, 9, SUP_POW2PS | IS_POW2PS},
{ "AT45DB321x", 0x1f2700, 8192, 528, 10, 0}, /* rev C */
{ "AT45DB321x", 0x1f2701, 8192, 528, 10, SUP_POW2PS},
{ "at45db321d", 0x1f2701, 8192, 512, 9, SUP_POW2PS | IS_POW2PS},
{ "AT45DB642x", 0x1f2800, 8192, 1056, 11, SUP_POW2PS},
{ "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
{ "AT45DB641E", 0x1f28000100ULL, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
{ "at45db641e", 0x1f28000100ULL, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
};
static struct flash_info *jedec_lookup(struct spi_device *spi,
u64 jedec, bool use_extid)
{
struct flash_info *info;
int status;
for (info = dataflash_data;
info < dataflash_data + ARRAY_SIZE(dataflash_data);
info++) {
if (use_extid && !(info->flags & SUP_EXTID))
continue;
if (info->jedec_id == jedec) {
dev_dbg(&spi->dev, "OTP, sector protect%s\n",
(info->flags & SUP_POW2PS) ?
", binary pagesize" : "");
if (info->flags & SUP_POW2PS) {
status = dataflash_status(spi);
if (status < 0) {
dev_dbg(&spi->dev, "status error %d\n",
status);
return ERR_PTR(status);
}
if (status & 0x1) {
if (info->flags & IS_POW2PS)
return info;
} else {
if (!(info->flags & IS_POW2PS))
return info;
}
} else
return info;
}
}
return ERR_PTR(-ENODEV);
}
static struct flash_info *jedec_probe(struct spi_device *spi)
{
int ret;
u8 code = OP_READ_ID;
u64 jedec;
u8 id[sizeof(jedec)] = {0};
const unsigned int id_size = 5;
struct flash_info *info;
/*
* JEDEC also defines an optional "extended device information"
* string for after vendor-specific data, after the three bytes
* we use here. Supporting some chips might require using it.
*
* If the vendor ID isn't Atmel's (0x1f), assume this call failed.
* That's not an error; only rev C and newer chips handle it, and
* only Atmel sells these chips.
*/
ret = spi_write_then_read(spi, &code, 1, id, id_size);
if (ret < 0) {
dev_dbg(&spi->dev, "error %d reading JEDEC ID\n", ret);
return ERR_PTR(ret);
}
if (id[0] != CFI_MFR_ATMEL)
return NULL;
jedec = be64_to_cpup((__be64 *)id);
/*
* First, try to match device using extended device
* information
*/
info = jedec_lookup(spi, jedec >> DATAFLASH_SHIFT_EXTID, true);
if (!IS_ERR(info))
return info;
/*
* If that fails, make another pass using regular ID
* information
*/
info = jedec_lookup(spi, jedec >> DATAFLASH_SHIFT_ID, false);
if (!IS_ERR(info))
return info;
/*
* Treat other chips as errors ... we won't know the right page
* size (it might be binary) even when we can tell which density
* class is involved (legacy chip id scheme).
*/
dev_warn(&spi->dev, "JEDEC id %016llx not handled\n", jedec);
return ERR_PTR(-ENODEV);
}
/*
* Detect and initialize DataFlash device, using JEDEC IDs on newer chips
* or else the ID code embedded in the status bits:
*
* Device Density ID code #Pages PageSize Offset
* AT45DB011B 1Mbit (128K) xx0011xx (0x0c) 512 264 9
* AT45DB021B 2Mbit (256K) xx0101xx (0x14) 1024 264 9
* AT45DB041B 4Mbit (512K) xx0111xx (0x1c) 2048 264 9
* AT45DB081B 8Mbit (1M) xx1001xx (0x24) 4096 264 9
* AT45DB0161B 16Mbit (2M) xx1011xx (0x2c) 4096 528 10
* AT45DB0321B 32Mbit (4M) xx1101xx (0x34) 8192 528 10
* AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11
* AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11
*/
static int dataflash_probe(struct spi_device *spi)
{
int status;
struct flash_info *info;
/*
* Try to detect dataflash by JEDEC ID.
* If it succeeds we know we have either a C or D part.
* D will support power of 2 pagesize option.
* Both support the security register, though with different
* write procedures.
*/
info = jedec_probe(spi);
if (IS_ERR(info))
return PTR_ERR(info);
if (info != NULL)
return add_dataflash_otp(spi, info->name, info->nr_pages,
info->pagesize, info->pageoffset,
(info->flags & SUP_POW2PS) ? 'd' : 'c');
/*
* Older chips support only legacy commands, identifing
* capacity using bits in the status byte.
*/
status = dataflash_status(spi);
if (status <= 0 || status == 0xff) {
dev_dbg(&spi->dev, "status error %d\n", status);
if (status == 0 || status == 0xff)
status = -ENODEV;
return status;
}
/* if there's a device there, assume it's dataflash.
* board setup should have set spi->max_speed_max to
* match f(car) for continuous reads, mode 0 or 3.
*/
switch (status & 0x3c) {
case 0x0c: /* 0 0 1 1 x x */
status = add_dataflash(spi, "AT45DB011B", 512, 264, 9);
break;
case 0x14: /* 0 1 0 1 x x */
status = add_dataflash(spi, "AT45DB021B", 1024, 264, 9);
break;
case 0x1c: /* 0 1 1 1 x x */
status = add_dataflash(spi, "AT45DB041x", 2048, 264, 9);
break;
case 0x24: /* 1 0 0 1 x x */
status = add_dataflash(spi, "AT45DB081B", 4096, 264, 9);
break;
case 0x2c: /* 1 0 1 1 x x */
status = add_dataflash(spi, "AT45DB161x", 4096, 528, 10);
break;
case 0x34: /* 1 1 0 1 x x */
status = add_dataflash(spi, "AT45DB321x", 8192, 528, 10);
break;
case 0x38: /* 1 1 1 x x x */
case 0x3c:
status = add_dataflash(spi, "AT45DB642x", 8192, 1056, 11);
break;
/* obsolete AT45DB1282 not (yet?) supported */
default:
dev_info(&spi->dev, "unsupported device (%x)\n",
status & 0x3c);
status = -ENODEV;
}
if (status < 0)
dev_dbg(&spi->dev, "add_dataflash --> %d\n", status);
return status;
}
static void dataflash_remove(struct spi_device *spi)
{
struct dataflash *flash = spi_get_drvdata(spi);
dev_dbg(&spi->dev, "remove\n");
WARN_ON(mtd_device_unregister(&flash->mtd));
kfree(flash);
}
static struct spi_driver dataflash_driver = {
.driver = {
.name = "mtd_dataflash",
.of_match_table = of_match_ptr(dataflash_dt_ids),
},
.probe = dataflash_probe,
.remove = dataflash_remove,
.id_table = dataflash_spi_ids,
/* FIXME: investigate suspend and resume... */
};
module_spi_driver(dataflash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Andrew Victor, David Brownell");
MODULE_DESCRIPTION("MTD DataFlash driver");
MODULE_ALIAS("spi:mtd_dataflash");
| linux-master | drivers/mtd/devices/mtd_dataflash.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Handles the M-Systems DiskOnChip G3 chip
*
* Copyright (C) 2011 Robert Jarzmik
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/bitmap.h>
#include <linux/bitrev.h>
#include <linux/bch.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#define CREATE_TRACE_POINTS
#include "docg3.h"
/*
* This driver handles the DiskOnChip G3 flash memory.
*
* As no specification is available from M-Systems/Sandisk, this drivers lacks
* several functions available on the chip, as :
* - IPL write
*
* The bus data width (8bits versus 16bits) is not handled (if_cfg flag), and
* the driver assumes a 16bits data bus.
*
* DocG3 relies on 2 ECC algorithms, which are handled in hardware :
* - a 1 byte Hamming code stored in the OOB for each page
* - a 7 bytes BCH code stored in the OOB for each page
* The BCH ECC is :
* - BCH is in GF(2^14)
* - BCH is over data of 520 bytes (512 page + 7 page_info bytes
* + 1 hamming byte)
* - BCH can correct up to 4 bits (t = 4)
* - BCH syndroms are calculated in hardware, and checked in hardware as well
*
*/
static unsigned int reliable_mode;
module_param(reliable_mode, uint, 0);
MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, "
"2=reliable) : MLC normal operations are in normal mode");
static int docg3_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section)
return -ERANGE;
/* byte 7 is Hamming ECC, byte 8-14 are BCH ECC */
oobregion->offset = 7;
oobregion->length = 8;
return 0;
}
static int docg3_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section > 1)
return -ERANGE;
/* free bytes: byte 0 until byte 6, byte 15 */
if (!section) {
oobregion->offset = 0;
oobregion->length = 7;
} else {
oobregion->offset = 15;
oobregion->length = 1;
}
return 0;
}
static const struct mtd_ooblayout_ops nand_ooblayout_docg3_ops = {
.ecc = docg3_ooblayout_ecc,
.free = docg3_ooblayout_free,
};
static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
{
u8 val = readb(docg3->cascade->base + reg);
trace_docg3_io(0, 8, reg, (int)val);
return val;
}
static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
{
u16 val = readw(docg3->cascade->base + reg);
trace_docg3_io(0, 16, reg, (int)val);
return val;
}
static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
{
writeb(val, docg3->cascade->base + reg);
trace_docg3_io(1, 8, reg, val);
}
static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
{
writew(val, docg3->cascade->base + reg);
trace_docg3_io(1, 16, reg, val);
}
static inline void doc_flash_command(struct docg3 *docg3, u8 cmd)
{
doc_writeb(docg3, cmd, DOC_FLASHCOMMAND);
}
static inline void doc_flash_sequence(struct docg3 *docg3, u8 seq)
{
doc_writeb(docg3, seq, DOC_FLASHSEQUENCE);
}
static inline void doc_flash_address(struct docg3 *docg3, u8 addr)
{
doc_writeb(docg3, addr, DOC_FLASHADDRESS);
}
static char const * const part_probes[] = { "cmdlinepart", "saftlpart", NULL };
static int doc_register_readb(struct docg3 *docg3, int reg)
{
u8 val;
doc_writew(docg3, reg, DOC_READADDRESS);
val = doc_readb(docg3, reg);
doc_vdbg("Read register %04x : %02x\n", reg, val);
return val;
}
static int doc_register_readw(struct docg3 *docg3, int reg)
{
u16 val;
doc_writew(docg3, reg, DOC_READADDRESS);
val = doc_readw(docg3, reg);
doc_vdbg("Read register %04x : %04x\n", reg, val);
return val;
}
/**
* doc_delay - delay docg3 operations
* @docg3: the device
* @nbNOPs: the number of NOPs to issue
*
* As no specification is available, the right timings between chip commands are
* unknown. The only available piece of information are the observed nops on a
* working docg3 chip.
* Therefore, doc_delay relies on a busy loop of NOPs, instead of scheduler
* friendlier msleep() functions or blocking mdelay().
*/
static void doc_delay(struct docg3 *docg3, int nbNOPs)
{
int i;
doc_vdbg("NOP x %d\n", nbNOPs);
for (i = 0; i < nbNOPs; i++)
doc_writeb(docg3, 0, DOC_NOP);
}
static int is_prot_seq_error(struct docg3 *docg3)
{
int ctrl;
ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
return ctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR);
}
static int doc_is_ready(struct docg3 *docg3)
{
int ctrl;
ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
return ctrl & DOC_CTRL_FLASHREADY;
}
static int doc_wait_ready(struct docg3 *docg3)
{
int maxWaitCycles = 100;
do {
doc_delay(docg3, 4);
cpu_relax();
} while (!doc_is_ready(docg3) && maxWaitCycles--);
doc_delay(docg3, 2);
if (maxWaitCycles > 0)
return 0;
else
return -EIO;
}
static int doc_reset_seq(struct docg3 *docg3)
{
int ret;
doc_writeb(docg3, 0x10, DOC_FLASHCONTROL);
doc_flash_sequence(docg3, DOC_SEQ_RESET);
doc_flash_command(docg3, DOC_CMD_RESET);
doc_delay(docg3, 2);
ret = doc_wait_ready(docg3);
doc_dbg("doc_reset_seq() -> isReady=%s\n", ret ? "false" : "true");
return ret;
}
/**
* doc_read_data_area - Read data from data area
* @docg3: the device
* @buf: the buffer to fill in (might be NULL is dummy reads)
* @len: the length to read
* @first: first time read, DOC_READADDRESS should be set
*
* Reads bytes from flash data. Handles the single byte / even bytes reads.
*/
static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
int first)
{
int i, cdr, len4;
u16 data16, *dst16;
u8 data8, *dst8;
doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len);
cdr = len & 0x1;
len4 = len - cdr;
if (first)
doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
dst16 = buf;
for (i = 0; i < len4; i += 2) {
data16 = doc_readw(docg3, DOC_IOSPACE_DATA);
if (dst16) {
*dst16 = data16;
dst16++;
}
}
if (cdr) {
doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
DOC_READADDRESS);
doc_delay(docg3, 1);
dst8 = (u8 *)dst16;
for (i = 0; i < cdr; i++) {
data8 = doc_readb(docg3, DOC_IOSPACE_DATA);
if (dst8) {
*dst8 = data8;
dst8++;
}
}
}
}
/**
* doc_write_data_area - Write data into data area
* @docg3: the device
* @buf: the buffer to get input bytes from
* @len: the length to write
*
* Writes bytes into flash data. Handles the single byte / even bytes writes.
*/
static void doc_write_data_area(struct docg3 *docg3, const void *buf, int len)
{
int i, cdr, len4;
u16 *src16;
u8 *src8;
doc_dbg("doc_write_data_area(buf=%p, len=%d)\n", buf, len);
cdr = len & 0x3;
len4 = len - cdr;
doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
src16 = (u16 *)buf;
for (i = 0; i < len4; i += 2) {
doc_writew(docg3, *src16, DOC_IOSPACE_DATA);
src16++;
}
src8 = (u8 *)src16;
for (i = 0; i < cdr; i++) {
doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
DOC_READADDRESS);
doc_writeb(docg3, *src8, DOC_IOSPACE_DATA);
src8++;
}
}
/**
* doc_set_reliable_mode - Sets the flash to normal or reliable data mode
* @docg3: the device
*
* The reliable data mode is a bit slower than the fast mode, but less errors
* occur. Entering the reliable mode cannot be done without entering the fast
* mode first.
*
* In reliable mode, pages 2*n and 2*n+1 are clones. Writing to page 0 of blocks
* (4,5) make the hardware write also to page 1 of blocks blocks(4,5). Reading
* from page 0 of blocks (4,5) or from page 1 of blocks (4,5) gives the same
* result, which is a logical and between bytes from page 0 and page 1 (which is
* consistent with the fact that writing to a page is _clearing_ bits of that
* page).
*/
static void doc_set_reliable_mode(struct docg3 *docg3)
{
static char *strmode[] = { "normal", "fast", "reliable", "invalid" };
doc_dbg("doc_set_reliable_mode(%s)\n", strmode[docg3->reliable]);
switch (docg3->reliable) {
case 0:
break;
case 1:
doc_flash_sequence(docg3, DOC_SEQ_SET_FASTMODE);
doc_flash_command(docg3, DOC_CMD_FAST_MODE);
break;
case 2:
doc_flash_sequence(docg3, DOC_SEQ_SET_RELIABLEMODE);
doc_flash_command(docg3, DOC_CMD_FAST_MODE);
doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
break;
default:
doc_err("doc_set_reliable_mode(): invalid mode\n");
break;
}
doc_delay(docg3, 2);
}
/**
* doc_set_asic_mode - Set the ASIC mode
* @docg3: the device
* @mode: the mode
*
* The ASIC can work in 3 modes :
* - RESET: all registers are zeroed
* - NORMAL: receives and handles commands
* - POWERDOWN: minimal poweruse, flash parts shut off
*/
static void doc_set_asic_mode(struct docg3 *docg3, u8 mode)
{
int i;
for (i = 0; i < 12; i++)
doc_readb(docg3, DOC_IOSPACE_IPL);
mode |= DOC_ASICMODE_MDWREN;
doc_dbg("doc_set_asic_mode(%02x)\n", mode);
doc_writeb(docg3, mode, DOC_ASICMODE);
doc_writeb(docg3, ~mode, DOC_ASICMODECONFIRM);
doc_delay(docg3, 1);
}
/**
* doc_set_device_id - Sets the devices id for cascaded G3 chips
* @docg3: the device
* @id: the chip to select (amongst 0, 1, 2, 3)
*
* There can be 4 cascaded G3 chips. This function selects the one which will
* should be the active one.
*/
static void doc_set_device_id(struct docg3 *docg3, int id)
{
u8 ctrl;
doc_dbg("doc_set_device_id(%d)\n", id);
doc_writeb(docg3, id, DOC_DEVICESELECT);
ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
ctrl &= ~DOC_CTRL_VIOLATION;
ctrl |= DOC_CTRL_CE;
doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
}
/**
* doc_set_extra_page_mode - Change flash page layout
* @docg3: the device
*
* Normally, the flash page is split into the data (512 bytes) and the out of
* band data (16 bytes). For each, 4 more bytes can be accessed, where the wear
* leveling counters are stored. To access this last area of 4 bytes, a special
* mode must be input to the flash ASIC.
*
* Returns 0 if no error occurred, -EIO else.
*/
static int doc_set_extra_page_mode(struct docg3 *docg3)
{
int fctrl;
doc_dbg("doc_set_extra_page_mode()\n");
doc_flash_sequence(docg3, DOC_SEQ_PAGE_SIZE_532);
doc_flash_command(docg3, DOC_CMD_PAGE_SIZE_532);
doc_delay(docg3, 2);
fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
if (fctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR))
return -EIO;
else
return 0;
}
/**
* doc_setup_addr_sector - Setup blocks/page/ofs address for one plane
* @docg3: the device
* @sector: the sector
*/
static void doc_setup_addr_sector(struct docg3 *docg3, int sector)
{
doc_delay(docg3, 1);
doc_flash_address(docg3, sector & 0xff);
doc_flash_address(docg3, (sector >> 8) & 0xff);
doc_flash_address(docg3, (sector >> 16) & 0xff);
doc_delay(docg3, 1);
}
/**
* doc_setup_writeaddr_sector - Setup blocks/page/ofs address for one plane
* @docg3: the device
* @sector: the sector
* @ofs: the offset in the page, between 0 and (512 + 16 + 512)
*/
static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs)
{
ofs = ofs >> 2;
doc_delay(docg3, 1);
doc_flash_address(docg3, ofs & 0xff);
doc_flash_address(docg3, sector & 0xff);
doc_flash_address(docg3, (sector >> 8) & 0xff);
doc_flash_address(docg3, (sector >> 16) & 0xff);
doc_delay(docg3, 1);
}
/**
* doc_read_seek - Set both flash planes to the specified block, page for reading
* @docg3: the device
* @block0: the first plane block index
* @block1: the second plane block index
* @page: the page index within the block
* @wear: if true, read will occur on the 4 extra bytes of the wear area
* @ofs: offset in page to read
*
* Programs the flash even and odd planes to the specific block and page.
* Alternatively, programs the flash to the wear area of the specified page.
*/
static int doc_read_seek(struct docg3 *docg3, int block0, int block1, int page,
int wear, int ofs)
{
int sector, ret = 0;
doc_dbg("doc_seek(blocks=(%d,%d), page=%d, ofs=%d, wear=%d)\n",
block0, block1, page, ofs, wear);
if (!wear && (ofs < 2 * DOC_LAYOUT_PAGE_SIZE)) {
doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
doc_delay(docg3, 2);
} else {
doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
doc_delay(docg3, 2);
}
doc_set_reliable_mode(docg3);
if (wear)
ret = doc_set_extra_page_mode(docg3);
if (ret)
goto out;
doc_flash_sequence(docg3, DOC_SEQ_READ);
sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
doc_setup_addr_sector(docg3, sector);
sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
doc_setup_addr_sector(docg3, sector);
doc_delay(docg3, 1);
out:
return ret;
}
/**
* doc_write_seek - Set both flash planes to the specified block, page for writing
* @docg3: the device
* @block0: the first plane block index
* @block1: the second plane block index
* @page: the page index within the block
* @ofs: offset in page to write
*
* Programs the flash even and odd planes to the specific block and page.
* Alternatively, programs the flash to the wear area of the specified page.
*/
static int doc_write_seek(struct docg3 *docg3, int block0, int block1, int page,
int ofs)
{
int ret = 0, sector;
doc_dbg("doc_write_seek(blocks=(%d,%d), page=%d, ofs=%d)\n",
block0, block1, page, ofs);
doc_set_reliable_mode(docg3);
if (ofs < 2 * DOC_LAYOUT_PAGE_SIZE) {
doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
doc_delay(docg3, 2);
} else {
doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
doc_delay(docg3, 2);
}
doc_flash_sequence(docg3, DOC_SEQ_PAGE_SETUP);
doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
doc_setup_writeaddr_sector(docg3, sector, ofs);
doc_flash_command(docg3, DOC_CMD_PROG_CYCLE3);
doc_delay(docg3, 2);
ret = doc_wait_ready(docg3);
if (ret)
goto out;
doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
doc_setup_writeaddr_sector(docg3, sector, ofs);
doc_delay(docg3, 1);
out:
return ret;
}
/**
* doc_read_page_ecc_init - Initialize hardware ECC engine
* @docg3: the device
* @len: the number of bytes covered by the ECC (BCH covered)
*
* The function does initialize the hardware ECC engine to compute the Hamming
* ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
*
* Return 0 if succeeded, -EIO on error
*/
static int doc_read_page_ecc_init(struct docg3 *docg3, int len)
{
doc_writew(docg3, DOC_ECCCONF0_READ_MODE
| DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
| (len & DOC_ECCCONF0_DATA_BYTES_MASK),
DOC_ECCCONF0);
doc_delay(docg3, 4);
doc_register_readb(docg3, DOC_FLASHCONTROL);
return doc_wait_ready(docg3);
}
/**
* doc_write_page_ecc_init - Initialize hardware BCH ECC engine
* @docg3: the device
* @len: the number of bytes covered by the ECC (BCH covered)
*
* The function does initialize the hardware ECC engine to compute the Hamming
* ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
*
* Return 0 if succeeded, -EIO on error
*/
static int doc_write_page_ecc_init(struct docg3 *docg3, int len)
{
doc_writew(docg3, DOC_ECCCONF0_WRITE_MODE
| DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
| (len & DOC_ECCCONF0_DATA_BYTES_MASK),
DOC_ECCCONF0);
doc_delay(docg3, 4);
doc_register_readb(docg3, DOC_FLASHCONTROL);
return doc_wait_ready(docg3);
}
/**
* doc_ecc_disable - Disable Hamming and BCH ECC hardware calculator
* @docg3: the device
*
* Disables the hardware ECC generator and checker, for unchecked reads (as when
* reading OOB only or write status byte).
*/
static void doc_ecc_disable(struct docg3 *docg3)
{
doc_writew(docg3, DOC_ECCCONF0_READ_MODE, DOC_ECCCONF0);
doc_delay(docg3, 4);
}
/**
* doc_hamming_ecc_init - Initialize hardware Hamming ECC engine
* @docg3: the device
* @nb_bytes: the number of bytes covered by the ECC (Hamming covered)
*
* This function programs the ECC hardware to compute the hamming code on the
* last provided N bytes to the hardware generator.
*/
static void doc_hamming_ecc_init(struct docg3 *docg3, int nb_bytes)
{
u8 ecc_conf1;
ecc_conf1 = doc_register_readb(docg3, DOC_ECCCONF1);
ecc_conf1 &= ~DOC_ECCCONF1_HAMMING_BITS_MASK;
ecc_conf1 |= (nb_bytes & DOC_ECCCONF1_HAMMING_BITS_MASK);
doc_writeb(docg3, ecc_conf1, DOC_ECCCONF1);
}
/**
* doc_ecc_bch_fix_data - Fix if need be read data from flash
* @docg3: the device
* @buf: the buffer of read data (512 + 7 + 1 bytes)
* @hwecc: the hardware calculated ECC.
* It's in fact recv_ecc ^ calc_ecc, where recv_ecc was read from OOB
* area data, and calc_ecc the ECC calculated by the hardware generator.
*
* Checks if the received data matches the ECC, and if an error is detected,
* tries to fix the bit flips (at most 4) in the buffer buf. As the docg3
* understands the (data, ecc, syndroms) in an inverted order in comparison to
* the BCH library, the function reverses the order of bits (ie. bit7 and bit0,
* bit6 and bit 1, ...) for all ECC data.
*
* The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
* algorithm is used to decode this. However the hw operates on page
* data in a bit order that is the reverse of that of the bch alg,
* requiring that the bits be reversed on the result. Thanks to Ivan
* Djelic for his analysis.
*
* Returns number of fixed bits (0, 1, 2, 3, 4) or -EBADMSG if too many bit
* errors were detected and cannot be fixed.
*/
static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc)
{
u8 ecc[DOC_ECC_BCH_SIZE];
int errorpos[DOC_ECC_BCH_T], i, numerrs;
for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
ecc[i] = bitrev8(hwecc[i]);
numerrs = bch_decode(docg3->cascade->bch, NULL,
DOC_ECC_BCH_COVERED_BYTES,
NULL, ecc, NULL, errorpos);
BUG_ON(numerrs == -EINVAL);
if (numerrs < 0)
goto out;
for (i = 0; i < numerrs; i++)
errorpos[i] = (errorpos[i] & ~7) | (7 - (errorpos[i] & 7));
for (i = 0; i < numerrs; i++)
if (errorpos[i] < DOC_ECC_BCH_COVERED_BYTES*8)
/* error is located in data, correct it */
change_bit(errorpos[i], buf);
out:
doc_dbg("doc_ecc_bch_fix_data: flipped %d bits\n", numerrs);
return numerrs;
}
/**
* doc_read_page_prepare - Prepares reading data from a flash page
* @docg3: the device
* @block0: the first plane block index on flash memory
* @block1: the second plane block index on flash memory
* @page: the page index in the block
* @offset: the offset in the page (must be a multiple of 4)
*
* Prepares the page to be read in the flash memory :
* - tell ASIC to map the flash pages
* - tell ASIC to be in read mode
*
* After a call to this method, a call to doc_read_page_finish is mandatory,
* to end the read cycle of the flash.
*
* Read data from a flash page. The length to be read must be between 0 and
* (page_size + oob_size + wear_size), ie. 532, and a multiple of 4 (because
* the extra bytes reading is not implemented).
*
* As pages are grouped by 2 (in 2 planes), reading from a page must be done
* in two steps:
* - one read of 512 bytes at offset 0
* - one read of 512 bytes at offset 512 + 16
*
* Returns 0 if successful, -EIO if a read error occurred.
*/
static int doc_read_page_prepare(struct docg3 *docg3, int block0, int block1,
int page, int offset)
{
int wear_area = 0, ret = 0;
doc_dbg("doc_read_page_prepare(blocks=(%d,%d), page=%d, ofsInPage=%d)\n",
block0, block1, page, offset);
if (offset >= DOC_LAYOUT_WEAR_OFFSET)
wear_area = 1;
if (!wear_area && offset > (DOC_LAYOUT_PAGE_OOB_SIZE * 2))
return -EINVAL;
doc_set_device_id(docg3, docg3->device_id);
ret = doc_reset_seq(docg3);
if (ret)
goto err;
/* Program the flash address block and page */
ret = doc_read_seek(docg3, block0, block1, page, wear_area, offset);
if (ret)
goto err;
doc_flash_command(docg3, DOC_CMD_READ_ALL_PLANES);
doc_delay(docg3, 2);
doc_wait_ready(docg3);
doc_flash_command(docg3, DOC_CMD_SET_ADDR_READ);
doc_delay(docg3, 1);
if (offset >= DOC_LAYOUT_PAGE_SIZE * 2)
offset -= 2 * DOC_LAYOUT_PAGE_SIZE;
doc_flash_address(docg3, offset >> 2);
doc_delay(docg3, 1);
doc_wait_ready(docg3);
doc_flash_command(docg3, DOC_CMD_READ_FLASH);
return 0;
err:
doc_writeb(docg3, 0, DOC_DATAEND);
doc_delay(docg3, 2);
return -EIO;
}
/**
* doc_read_page_getbytes - Reads bytes from a prepared page
* @docg3: the device
* @len: the number of bytes to be read (must be a multiple of 4)
* @buf: the buffer to be filled in (or NULL is forget bytes)
* @first: 1 if first time read, DOC_READADDRESS should be set
* @last_odd: 1 if last read ended up on an odd byte
*
* Reads bytes from a prepared page. There is a trickery here : if the last read
* ended up on an odd offset in the 1024 bytes double page, ie. between the 2
* planes, the first byte must be read apart. If a word (16bit) read was used,
* the read would return the byte of plane 2 as low *and* high endian, which
* will mess the read.
*
*/
static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
int first, int last_odd)
{
if (last_odd && len > 0) {
doc_read_data_area(docg3, buf, 1, first);
doc_read_data_area(docg3, buf ? buf + 1 : buf, len - 1, 0);
} else {
doc_read_data_area(docg3, buf, len, first);
}
doc_delay(docg3, 2);
return len;
}
/**
* doc_write_page_putbytes - Writes bytes into a prepared page
* @docg3: the device
* @len: the number of bytes to be written
* @buf: the buffer of input bytes
*
*/
static void doc_write_page_putbytes(struct docg3 *docg3, int len,
const u_char *buf)
{
doc_write_data_area(docg3, buf, len);
doc_delay(docg3, 2);
}
/**
* doc_get_bch_hw_ecc - Get hardware calculated BCH ECC
* @docg3: the device
* @hwecc: the array of 7 integers where the hardware ecc will be stored
*/
static void doc_get_bch_hw_ecc(struct docg3 *docg3, u8 *hwecc)
{
int i;
for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
hwecc[i] = doc_register_readb(docg3, DOC_BCH_HW_ECC(i));
}
/**
* doc_page_finish - Ends reading/writing of a flash page
* @docg3: the device
*/
static void doc_page_finish(struct docg3 *docg3)
{
doc_writeb(docg3, 0, DOC_DATAEND);
doc_delay(docg3, 2);
}
/**
* doc_read_page_finish - Ends reading of a flash page
* @docg3: the device
*
* As a side effect, resets the chip selector to 0. This ensures that after each
* read operation, the floor 0 is selected. Therefore, if the systems halts, the
* reboot will boot on floor 0, where the IPL is.
*/
static void doc_read_page_finish(struct docg3 *docg3)
{
doc_page_finish(docg3);
doc_set_device_id(docg3, 0);
}
/**
* calc_block_sector - Calculate blocks, pages and ofs.
*
* @from: offset in flash
* @block0: first plane block index calculated
* @block1: second plane block index calculated
* @page: page calculated
* @ofs: offset in page
* @reliable: 0 if docg3 in normal mode, 1 if docg3 in fast mode, 2 if docg3 in
* reliable mode.
*
* The calculation is based on the reliable/normal mode. In normal mode, the 64
* pages of a block are available. In reliable mode, as pages 2*n and 2*n+1 are
* clones, only 32 pages per block are available.
*/
static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
int *ofs, int reliable)
{
uint sector, pages_biblock;
pages_biblock = DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES;
if (reliable == 1 || reliable == 2)
pages_biblock /= 2;
sector = from / DOC_LAYOUT_PAGE_SIZE;
*block0 = sector / pages_biblock * DOC_LAYOUT_NBPLANES;
*block1 = *block0 + 1;
*page = sector % pages_biblock;
*page /= DOC_LAYOUT_NBPLANES;
if (reliable == 1 || reliable == 2)
*page *= 2;
if (sector % 2)
*ofs = DOC_LAYOUT_PAGE_OOB_SIZE;
else
*ofs = 0;
}
/**
* doc_read_oob - Read out of band bytes from flash
* @mtd: the device
* @from: the offset from first block and first page, in bytes, aligned on page
* size
* @ops: the mtd oob structure
*
* Reads flash memory OOB area of pages.
*
* Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
*/
static int doc_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct docg3 *docg3 = mtd->priv;
int block0, block1, page, ret, skip, ofs = 0;
u8 *oobbuf = ops->oobbuf;
u8 *buf = ops->datbuf;
size_t len, ooblen, nbdata, nboob;
u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
struct mtd_ecc_stats old_stats;
int max_bitflips = 0;
if (buf)
len = ops->len;
else
len = 0;
if (oobbuf)
ooblen = ops->ooblen;
else
ooblen = 0;
if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
oobbuf += ops->ooboffs;
doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
from, ops->mode, buf, len, oobbuf, ooblen);
if (ooblen % DOC_LAYOUT_OOB_SIZE)
return -EINVAL;
ops->oobretlen = 0;
ops->retlen = 0;
ret = 0;
skip = from % DOC_LAYOUT_PAGE_SIZE;
mutex_lock(&docg3->cascade->lock);
old_stats = mtd->ecc_stats;
while (ret >= 0 && (len > 0 || ooblen > 0)) {
calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
docg3->reliable);
nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE);
ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
if (ret < 0)
goto out;
ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
if (ret < 0)
goto err_in_read;
ret = doc_read_page_getbytes(docg3, skip, NULL, 1, 0);
if (ret < skip)
goto err_in_read;
ret = doc_read_page_getbytes(docg3, nbdata, buf, 0, skip % 2);
if (ret < nbdata)
goto err_in_read;
doc_read_page_getbytes(docg3,
DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
NULL, 0, (skip + nbdata) % 2);
ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0, 0);
if (ret < nboob)
goto err_in_read;
doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
NULL, 0, nboob % 2);
doc_get_bch_hw_ecc(docg3, hwecc);
eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
if (nboob >= DOC_LAYOUT_OOB_SIZE) {
doc_dbg("OOB - INFO: %*phC\n", 7, oobbuf);
doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]);
doc_dbg("OOB - BCH_ECC: %*phC\n", 7, oobbuf + 8);
doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]);
}
doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1);
doc_dbg("ECC HW_ECC: %*phC\n", 7, hwecc);
ret = -EIO;
if (is_prot_seq_error(docg3))
goto err_in_read;
ret = 0;
if ((block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) &&
(eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR) &&
(eccconf1 & DOC_ECCCONF1_PAGE_IS_WRITTEN) &&
(ops->mode != MTD_OPS_RAW) &&
(nbdata == DOC_LAYOUT_PAGE_SIZE)) {
ret = doc_ecc_bch_fix_data(docg3, buf, hwecc);
if (ret < 0) {
mtd->ecc_stats.failed++;
ret = -EBADMSG;
}
if (ret > 0) {
mtd->ecc_stats.corrected += ret;
max_bitflips = max(max_bitflips, ret);
ret = max_bitflips;
}
}
doc_read_page_finish(docg3);
ops->retlen += nbdata;
ops->oobretlen += nboob;
buf += nbdata;
oobbuf += nboob;
len -= nbdata;
ooblen -= nboob;
from += DOC_LAYOUT_PAGE_SIZE;
skip = 0;
}
out:
if (ops->stats) {
ops->stats->uncorrectable_errors +=
mtd->ecc_stats.failed - old_stats.failed;
ops->stats->corrected_bitflips +=
mtd->ecc_stats.corrected - old_stats.corrected;
}
mutex_unlock(&docg3->cascade->lock);
return ret;
err_in_read:
doc_read_page_finish(docg3);
goto out;
}
static int doc_reload_bbt(struct docg3 *docg3)
{
int block = DOC_LAYOUT_BLOCK_BBT;
int ret = 0, nbpages, page;
u_char *buf = docg3->bbt;
nbpages = DIV_ROUND_UP(docg3->max_block + 1, 8 * DOC_LAYOUT_PAGE_SIZE);
for (page = 0; !ret && (page < nbpages); page++) {
ret = doc_read_page_prepare(docg3, block, block + 1,
page + DOC_LAYOUT_PAGE_BBT, 0);
if (!ret)
ret = doc_read_page_ecc_init(docg3,
DOC_LAYOUT_PAGE_SIZE);
if (!ret)
doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE,
buf, 1, 0);
buf += DOC_LAYOUT_PAGE_SIZE;
}
doc_read_page_finish(docg3);
return ret;
}
/**
* doc_block_isbad - Checks whether a block is good or not
* @mtd: the device
* @from: the offset to find the correct block
*
* Returns 1 if block is bad, 0 if block is good
*/
static int doc_block_isbad(struct mtd_info *mtd, loff_t from)
{
struct docg3 *docg3 = mtd->priv;
int block0, block1, page, ofs, is_good;
calc_block_sector(from, &block0, &block1, &page, &ofs,
docg3->reliable);
doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n",
from, block0, block1, page, ofs);
if (block0 < DOC_LAYOUT_BLOCK_FIRST_DATA)
return 0;
if (block1 > docg3->max_block)
return -EINVAL;
is_good = docg3->bbt[block0 >> 3] & (1 << (block0 & 0x7));
return !is_good;
}
#if 0
/**
* doc_get_erase_count - Get block erase count
* @docg3: the device
* @from: the offset in which the block is.
*
* Get the number of times a block was erased. The number is the maximum of
* erase times between first and second plane (which should be equal normally).
*
* Returns The number of erases, or -EINVAL or -EIO on error.
*/
static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
{
u8 buf[DOC_LAYOUT_WEAR_SIZE];
int ret, plane1_erase_count, plane2_erase_count;
int block0, block1, page, ofs;
doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf);
if (from % DOC_LAYOUT_PAGE_SIZE)
return -EINVAL;
calc_block_sector(from, &block0, &block1, &page, &ofs, docg3->reliable);
if (block1 > docg3->max_block)
return -EINVAL;
ret = doc_reset_seq(docg3);
if (!ret)
ret = doc_read_page_prepare(docg3, block0, block1, page,
ofs + DOC_LAYOUT_WEAR_OFFSET, 0);
if (!ret)
ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE,
buf, 1, 0);
doc_read_page_finish(docg3);
if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK))
return -EIO;
plane1_erase_count = (u8)(~buf[1]) | ((u8)(~buf[4]) << 8)
| ((u8)(~buf[5]) << 16);
plane2_erase_count = (u8)(~buf[3]) | ((u8)(~buf[6]) << 8)
| ((u8)(~buf[7]) << 16);
return max(plane1_erase_count, plane2_erase_count);
}
#endif
/**
* doc_get_op_status - get erase/write operation status
* @docg3: the device
*
* Queries the status from the chip, and returns it
*
* Returns the status (bits DOC_PLANES_STATUS_*)
*/
static int doc_get_op_status(struct docg3 *docg3)
{
u8 status;
doc_flash_sequence(docg3, DOC_SEQ_PLANES_STATUS);
doc_flash_command(docg3, DOC_CMD_PLANES_STATUS);
doc_delay(docg3, 5);
doc_ecc_disable(docg3);
doc_read_data_area(docg3, &status, 1, 1);
return status;
}
/**
* doc_write_erase_wait_status - wait for write or erase completion
* @docg3: the device
*
* Wait for the chip to be ready again after erase or write operation, and check
* erase/write status.
*
* Returns 0 if erase successful, -EIO if erase/write issue, -ETIMEOUT if
* timeout
*/
static int doc_write_erase_wait_status(struct docg3 *docg3)
{
int i, status, ret = 0;
for (i = 0; !doc_is_ready(docg3) && i < 5; i++)
msleep(20);
if (!doc_is_ready(docg3)) {
doc_dbg("Timeout reached and the chip is still not ready\n");
ret = -EAGAIN;
goto out;
}
status = doc_get_op_status(docg3);
if (status & DOC_PLANES_STATUS_FAIL) {
doc_dbg("Erase/Write failed on (a) plane(s), status = %x\n",
status);
ret = -EIO;
}
out:
doc_page_finish(docg3);
return ret;
}
/**
* doc_erase_block - Erase a couple of blocks
* @docg3: the device
* @block0: the first block to erase (leftmost plane)
* @block1: the second block to erase (rightmost plane)
*
* Erase both blocks, and return operation status
*
* Returns 0 if erase successful, -EIO if erase issue, -ETIMEOUT if chip not
* ready for too long
*/
static int doc_erase_block(struct docg3 *docg3, int block0, int block1)
{
int ret, sector;
doc_dbg("doc_erase_block(blocks=(%d,%d))\n", block0, block1);
ret = doc_reset_seq(docg3);
if (ret)
return -EIO;
doc_set_reliable_mode(docg3);
doc_flash_sequence(docg3, DOC_SEQ_ERASE);
sector = block0 << DOC_ADDR_BLOCK_SHIFT;
doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
doc_setup_addr_sector(docg3, sector);
sector = block1 << DOC_ADDR_BLOCK_SHIFT;
doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
doc_setup_addr_sector(docg3, sector);
doc_delay(docg3, 1);
doc_flash_command(docg3, DOC_CMD_ERASECYCLE2);
doc_delay(docg3, 2);
if (is_prot_seq_error(docg3)) {
doc_err("Erase blocks %d,%d error\n", block0, block1);
return -EIO;
}
return doc_write_erase_wait_status(docg3);
}
/**
* doc_erase - Erase a portion of the chip
* @mtd: the device
* @info: the erase info
*
* Erase a bunch of contiguous blocks, by pairs, as a "mtd" page of 1024 is
* split into 2 pages of 512 bytes on 2 contiguous blocks.
*
* Returns 0 if erase successful, -EINVAL if addressing error, -EIO if erase
* issue
*/
static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
{
struct docg3 *docg3 = mtd->priv;
uint64_t len;
int block0, block1, page, ret = 0, ofs = 0;
doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len);
calc_block_sector(info->addr + info->len, &block0, &block1, &page,
&ofs, docg3->reliable);
if (info->addr + info->len > mtd->size || page || ofs)
return -EINVAL;
calc_block_sector(info->addr, &block0, &block1, &page, &ofs,
docg3->reliable);
mutex_lock(&docg3->cascade->lock);
doc_set_device_id(docg3, docg3->device_id);
doc_set_reliable_mode(docg3);
for (len = info->len; !ret && len > 0; len -= mtd->erasesize) {
ret = doc_erase_block(docg3, block0, block1);
block0 += 2;
block1 += 2;
}
mutex_unlock(&docg3->cascade->lock);
return ret;
}
/**
* doc_write_page - Write a single page to the chip
* @docg3: the device
* @to: the offset from first block and first page, in bytes, aligned on page
* size
* @buf: buffer to get bytes from
* @oob: buffer to get out of band bytes from (can be NULL if no OOB should be
* written)
* @autoecc: if 0, all 16 bytes from OOB are taken, regardless of HW Hamming or
* BCH computations. If 1, only bytes 0-7 and byte 15 are taken,
* remaining ones are filled with hardware Hamming and BCH
* computations. Its value is not meaningfull is oob == NULL.
*
* Write one full page (ie. 1 page split on two planes), of 512 bytes, with the
* OOB data. The OOB ECC is automatically computed by the hardware Hamming and
* BCH generator if autoecc is not null.
*
* Returns 0 if write successful, -EIO if write error, -EAGAIN if timeout
*/
static int doc_write_page(struct docg3 *docg3, loff_t to, const u_char *buf,
const u_char *oob, int autoecc)
{
int block0, block1, page, ret, ofs = 0;
u8 hwecc[DOC_ECC_BCH_SIZE], hamming;
doc_dbg("doc_write_page(to=%lld)\n", to);
calc_block_sector(to, &block0, &block1, &page, &ofs, docg3->reliable);
doc_set_device_id(docg3, docg3->device_id);
ret = doc_reset_seq(docg3);
if (ret)
goto err;
/* Program the flash address block and page */
ret = doc_write_seek(docg3, block0, block1, page, ofs);
if (ret)
goto err;
doc_write_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
doc_delay(docg3, 2);
doc_write_page_putbytes(docg3, DOC_LAYOUT_PAGE_SIZE, buf);
if (oob && autoecc) {
doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ, oob);
doc_delay(docg3, 2);
oob += DOC_LAYOUT_OOB_UNUSED_OFS;
hamming = doc_register_readb(docg3, DOC_HAMMINGPARITY);
doc_delay(docg3, 2);
doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_HAMMING_SZ,
&hamming);
doc_delay(docg3, 2);
doc_get_bch_hw_ecc(docg3, hwecc);
doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_BCH_SZ, hwecc);
doc_delay(docg3, 2);
doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_UNUSED_SZ, oob);
}
if (oob && !autoecc)
doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_SIZE, oob);
doc_delay(docg3, 2);
doc_page_finish(docg3);
doc_delay(docg3, 2);
doc_flash_command(docg3, DOC_CMD_PROG_CYCLE2);
doc_delay(docg3, 2);
/*
* The wait status will perform another doc_page_finish() call, but that
* seems to please the docg3, so leave it.
*/
ret = doc_write_erase_wait_status(docg3);
return ret;
err:
doc_read_page_finish(docg3);
return ret;
}
/**
* doc_guess_autoecc - Guess autoecc mode from mbd_oob_ops
* @ops: the oob operations
*
* Returns 0 or 1 if success, -EINVAL if invalid oob mode
*/
static int doc_guess_autoecc(struct mtd_oob_ops *ops)
{
int autoecc;
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
case MTD_OPS_AUTO_OOB:
autoecc = 1;
break;
case MTD_OPS_RAW:
autoecc = 0;
break;
default:
autoecc = -EINVAL;
}
return autoecc;
}
/**
* doc_fill_autooob - Fill a 16 bytes OOB from 8 non-ECC bytes
* @dst: the target 16 bytes OOB buffer
* @oobsrc: the source 8 bytes non-ECC OOB buffer
*
*/
static void doc_fill_autooob(u8 *dst, u8 *oobsrc)
{
memcpy(dst, oobsrc, DOC_LAYOUT_OOB_PAGEINFO_SZ);
dst[DOC_LAYOUT_OOB_UNUSED_OFS] = oobsrc[DOC_LAYOUT_OOB_PAGEINFO_SZ];
}
/**
* doc_backup_oob - Backup OOB into docg3 structure
* @docg3: the device
* @to: the page offset in the chip
* @ops: the OOB size and buffer
*
* As the docg3 should write a page with its OOB in one pass, and some userland
* applications do write_oob() to setup the OOB and then write(), store the OOB
* into a temporary storage. This is very dangerous, as 2 concurrent
* applications could store an OOB, and then write their pages (which will
* result into one having its OOB corrupted).
*
* The only reliable way would be for userland to call doc_write_oob() with both
* the page data _and_ the OOB area.
*
* Returns 0 if success, -EINVAL if ops content invalid
*/
static int doc_backup_oob(struct docg3 *docg3, loff_t to,
struct mtd_oob_ops *ops)
{
int ooblen = ops->ooblen, autoecc;
if (ooblen != DOC_LAYOUT_OOB_SIZE)
return -EINVAL;
autoecc = doc_guess_autoecc(ops);
if (autoecc < 0)
return autoecc;
docg3->oob_write_ofs = to;
docg3->oob_autoecc = autoecc;
if (ops->mode == MTD_OPS_AUTO_OOB) {
doc_fill_autooob(docg3->oob_write_buf, ops->oobbuf);
ops->oobretlen = 8;
} else {
memcpy(docg3->oob_write_buf, ops->oobbuf, DOC_LAYOUT_OOB_SIZE);
ops->oobretlen = DOC_LAYOUT_OOB_SIZE;
}
return 0;
}
/**
* doc_write_oob - Write out of band bytes to flash
* @mtd: the device
* @ofs: the offset from first block and first page, in bytes, aligned on page
* size
* @ops: the mtd oob structure
*
* Either write OOB data into a temporary buffer, for the subsequent write
* page. The provided OOB should be 16 bytes long. If a data buffer is provided
* as well, issue the page write.
* Or provide data without OOB, and then a all zeroed OOB will be used (ECC will
* still be filled in if asked for).
*
* Returns 0 is successful, EINVAL if length is not 14 bytes
*/
static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
struct mtd_oob_ops *ops)
{
struct docg3 *docg3 = mtd->priv;
int ret, autoecc, oobdelta;
u8 *oobbuf = ops->oobbuf;
u8 *buf = ops->datbuf;
size_t len, ooblen;
u8 oob[DOC_LAYOUT_OOB_SIZE];
if (buf)
len = ops->len;
else
len = 0;
if (oobbuf)
ooblen = ops->ooblen;
else
ooblen = 0;
if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
oobbuf += ops->ooboffs;
doc_dbg("doc_write_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
ofs, ops->mode, buf, len, oobbuf, ooblen);
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
case MTD_OPS_RAW:
oobdelta = mtd->oobsize;
break;
case MTD_OPS_AUTO_OOB:
oobdelta = mtd->oobavail;
break;
default:
return -EINVAL;
}
if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) ||
(ofs % DOC_LAYOUT_PAGE_SIZE))
return -EINVAL;
if (len && ooblen &&
(len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
return -EINVAL;
ops->oobretlen = 0;
ops->retlen = 0;
ret = 0;
if (len == 0 && ooblen == 0)
return -EINVAL;
if (len == 0 && ooblen > 0)
return doc_backup_oob(docg3, ofs, ops);
autoecc = doc_guess_autoecc(ops);
if (autoecc < 0)
return autoecc;
mutex_lock(&docg3->cascade->lock);
while (!ret && len > 0) {
memset(oob, 0, sizeof(oob));
if (ofs == docg3->oob_write_ofs)
memcpy(oob, docg3->oob_write_buf, DOC_LAYOUT_OOB_SIZE);
else if (ooblen > 0 && ops->mode == MTD_OPS_AUTO_OOB)
doc_fill_autooob(oob, oobbuf);
else if (ooblen > 0)
memcpy(oob, oobbuf, DOC_LAYOUT_OOB_SIZE);
ret = doc_write_page(docg3, ofs, buf, oob, autoecc);
ofs += DOC_LAYOUT_PAGE_SIZE;
len -= DOC_LAYOUT_PAGE_SIZE;
buf += DOC_LAYOUT_PAGE_SIZE;
if (ooblen) {
oobbuf += oobdelta;
ooblen -= oobdelta;
ops->oobretlen += oobdelta;
}
ops->retlen += DOC_LAYOUT_PAGE_SIZE;
}
doc_set_device_id(docg3, 0);
mutex_unlock(&docg3->cascade->lock);
return ret;
}
static struct docg3 *sysfs_dev2docg3(struct device *dev,
struct device_attribute *attr)
{
int floor;
struct mtd_info **docg3_floors = dev_get_drvdata(dev);
floor = attr->attr.name[1] - '0';
if (floor < 0 || floor >= DOC_MAX_NBFLOORS)
return NULL;
else
return docg3_floors[floor]->priv;
}
static ssize_t dps0_is_key_locked(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
int dps0;
mutex_lock(&docg3->cascade->lock);
doc_set_device_id(docg3, docg3->device_id);
dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
doc_set_device_id(docg3, 0);
mutex_unlock(&docg3->cascade->lock);
return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK));
}
static ssize_t dps1_is_key_locked(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
int dps1;
mutex_lock(&docg3->cascade->lock);
doc_set_device_id(docg3, docg3->device_id);
dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
doc_set_device_id(docg3, 0);
mutex_unlock(&docg3->cascade->lock);
return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK));
}
static ssize_t dps0_insert_key(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
int i;
if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
return -EINVAL;
mutex_lock(&docg3->cascade->lock);
doc_set_device_id(docg3, docg3->device_id);
for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
doc_writeb(docg3, buf[i], DOC_DPS0_KEY);
doc_set_device_id(docg3, 0);
mutex_unlock(&docg3->cascade->lock);
return count;
}
static ssize_t dps1_insert_key(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
int i;
if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
return -EINVAL;
mutex_lock(&docg3->cascade->lock);
doc_set_device_id(docg3, docg3->device_id);
for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
doc_writeb(docg3, buf[i], DOC_DPS1_KEY);
doc_set_device_id(docg3, 0);
mutex_unlock(&docg3->cascade->lock);
return count;
}
#define FLOOR_SYSFS(id) { \
__ATTR(f##id##_dps0_is_keylocked, S_IRUGO, dps0_is_key_locked, NULL), \
__ATTR(f##id##_dps1_is_keylocked, S_IRUGO, dps1_is_key_locked, NULL), \
__ATTR(f##id##_dps0_protection_key, S_IWUSR|S_IWGRP, NULL, dps0_insert_key), \
__ATTR(f##id##_dps1_protection_key, S_IWUSR|S_IWGRP, NULL, dps1_insert_key), \
}
static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = {
FLOOR_SYSFS(0), FLOOR_SYSFS(1), FLOOR_SYSFS(2), FLOOR_SYSFS(3)
};
static int doc_register_sysfs(struct platform_device *pdev,
struct docg3_cascade *cascade)
{
struct device *dev = &pdev->dev;
int floor;
int ret;
int i;
for (floor = 0;
floor < DOC_MAX_NBFLOORS && cascade->floors[floor];
floor++) {
for (i = 0; i < 4; i++) {
ret = device_create_file(dev, &doc_sys_attrs[floor][i]);
if (ret)
goto remove_files;
}
}
return 0;
remove_files:
do {
while (--i >= 0)
device_remove_file(dev, &doc_sys_attrs[floor][i]);
i = 4;
} while (--floor >= 0);
return ret;
}
static void doc_unregister_sysfs(struct platform_device *pdev,
struct docg3_cascade *cascade)
{
struct device *dev = &pdev->dev;
int floor, i;
for (floor = 0; floor < DOC_MAX_NBFLOORS && cascade->floors[floor];
floor++)
for (i = 0; i < 4; i++)
device_remove_file(dev, &doc_sys_attrs[floor][i]);
}
/*
* Debug sysfs entries
*/
static int flashcontrol_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = s->private;
u8 fctrl;
mutex_lock(&docg3->cascade->lock);
fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
mutex_unlock(&docg3->cascade->lock);
seq_printf(s, "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
fctrl,
fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-",
fctrl & DOC_CTRL_CE ? "active" : "inactive",
fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-",
fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-",
fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(flashcontrol);
static int asic_mode_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = s->private;
int pctrl, mode;
mutex_lock(&docg3->cascade->lock);
pctrl = doc_register_readb(docg3, DOC_ASICMODE);
mode = pctrl & 0x03;
mutex_unlock(&docg3->cascade->lock);
seq_printf(s,
"%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
pctrl,
pctrl & DOC_ASICMODE_RAM_WE ? 1 : 0,
pctrl & DOC_ASICMODE_RSTIN_RESET ? 1 : 0,
pctrl & DOC_ASICMODE_BDETCT_RESET ? 1 : 0,
pctrl & DOC_ASICMODE_MDWREN ? 1 : 0,
pctrl & DOC_ASICMODE_POWERDOWN ? 1 : 0,
mode >> 1, mode & 0x1);
switch (mode) {
case DOC_ASICMODE_RESET:
seq_puts(s, "reset");
break;
case DOC_ASICMODE_NORMAL:
seq_puts(s, "normal");
break;
case DOC_ASICMODE_POWERDOWN:
seq_puts(s, "powerdown");
break;
}
seq_puts(s, ")\n");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(asic_mode);
static int device_id_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = s->private;
int id;
mutex_lock(&docg3->cascade->lock);
id = doc_register_readb(docg3, DOC_DEVICESELECT);
mutex_unlock(&docg3->cascade->lock);
seq_printf(s, "DeviceId = %d\n", id);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(device_id);
static int protection_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = s->private;
int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
mutex_lock(&docg3->cascade->lock);
protect = doc_register_readb(docg3, DOC_PROTECTION);
dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW);
dps0_high = doc_register_readw(docg3, DOC_DPS0_ADDRHIGH);
dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW);
dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
mutex_unlock(&docg3->cascade->lock);
seq_printf(s, "Protection = 0x%02x (", protect);
if (protect & DOC_PROTECT_FOUNDRY_OTP_LOCK)
seq_puts(s, "FOUNDRY_OTP_LOCK,");
if (protect & DOC_PROTECT_CUSTOMER_OTP_LOCK)
seq_puts(s, "CUSTOMER_OTP_LOCK,");
if (protect & DOC_PROTECT_LOCK_INPUT)
seq_puts(s, "LOCK_INPUT,");
if (protect & DOC_PROTECT_STICKY_LOCK)
seq_puts(s, "STICKY_LOCK,");
if (protect & DOC_PROTECT_PROTECTION_ENABLED)
seq_puts(s, "PROTECTION ON,");
if (protect & DOC_PROTECT_IPL_DOWNLOAD_LOCK)
seq_puts(s, "IPL_DOWNLOAD_LOCK,");
if (protect & DOC_PROTECT_PROTECTION_ERROR)
seq_puts(s, "PROTECT_ERR,");
else
seq_puts(s, "NO_PROTECT_ERR");
seq_puts(s, ")\n");
seq_printf(s, "DPS0 = 0x%02x : Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
dps0, dps0_low, dps0_high,
!!(dps0 & DOC_DPS_OTP_PROTECTED),
!!(dps0 & DOC_DPS_READ_PROTECTED),
!!(dps0 & DOC_DPS_WRITE_PROTECTED),
!!(dps0 & DOC_DPS_HW_LOCK_ENABLED),
!!(dps0 & DOC_DPS_KEY_OK));
seq_printf(s, "DPS1 = 0x%02x : Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
dps1, dps1_low, dps1_high,
!!(dps1 & DOC_DPS_OTP_PROTECTED),
!!(dps1 & DOC_DPS_READ_PROTECTED),
!!(dps1 & DOC_DPS_WRITE_PROTECTED),
!!(dps1 & DOC_DPS_HW_LOCK_ENABLED),
!!(dps1 & DOC_DPS_KEY_OK));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(protection);
static void __init doc_dbg_register(struct mtd_info *floor)
{
struct dentry *root = floor->dbg.dfs_dir;
struct docg3 *docg3 = floor->priv;
if (IS_ERR_OR_NULL(root)) {
if (IS_ENABLED(CONFIG_DEBUG_FS) &&
!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
dev_warn(floor->dev.parent,
"CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
return;
}
debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3,
&flashcontrol_fops);
debugfs_create_file("docg3_asic_mode", S_IRUSR, root, docg3,
&asic_mode_fops);
debugfs_create_file("docg3_device_id", S_IRUSR, root, docg3,
&device_id_fops);
debugfs_create_file("docg3_protection", S_IRUSR, root, docg3,
&protection_fops);
}
/**
* doc_set_driver_info - Fill the mtd_info structure and docg3 structure
* @chip_id: The chip ID of the supported chip
* @mtd: The structure to fill
*/
static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
{
struct docg3 *docg3 = mtd->priv;
int cfg;
cfg = doc_register_readb(docg3, DOC_CONFIGURATION);
docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0);
docg3->reliable = reliable_mode;
switch (chip_id) {
case DOC_CHIPID_G3:
mtd->name = devm_kasprintf(docg3->dev, GFP_KERNEL, "docg3.%d",
docg3->device_id);
if (!mtd->name)
return -ENOMEM;
docg3->max_block = 2047;
break;
}
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE;
if (docg3->reliable == 2)
mtd->size /= 2;
mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
if (docg3->reliable == 2)
mtd->erasesize /= 2;
mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
mtd->_erase = doc_erase;
mtd->_read_oob = doc_read_oob;
mtd->_write_oob = doc_write_oob;
mtd->_block_isbad = doc_block_isbad;
mtd_set_ooblayout(mtd, &nand_ooblayout_docg3_ops);
mtd->oobavail = 8;
mtd->ecc_strength = DOC_ECC_BCH_T;
return 0;
}
/**
* doc_probe_device - Check if a device is available
* @cascade: the cascade of chips this devices will belong to
* @floor: the floor of the probed device
* @dev: the device
*
* Checks whether a device at the specified IO range, and floor is available.
*
* Returns a mtd_info struct if there is a device, ENODEV if none found, ENOMEM
* if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is
* launched.
*/
static struct mtd_info * __init
doc_probe_device(struct docg3_cascade *cascade, int floor, struct device *dev)
{
int ret, bbt_nbpages;
u16 chip_id, chip_id_inv;
struct docg3 *docg3;
struct mtd_info *mtd;
ret = -ENOMEM;
docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL);
if (!docg3)
goto nomem1;
mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
if (!mtd)
goto nomem2;
mtd->priv = docg3;
mtd->dev.parent = dev;
bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
8 * DOC_LAYOUT_PAGE_SIZE);
docg3->bbt = kcalloc(DOC_LAYOUT_PAGE_SIZE, bbt_nbpages, GFP_KERNEL);
if (!docg3->bbt)
goto nomem3;
docg3->dev = dev;
docg3->device_id = floor;
docg3->cascade = cascade;
doc_set_device_id(docg3, docg3->device_id);
if (!floor)
doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL);
chip_id = doc_register_readw(docg3, DOC_CHIPID);
chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV);
ret = 0;
if (chip_id != (u16)(~chip_id_inv)) {
goto nomem4;
}
switch (chip_id) {
case DOC_CHIPID_G3:
doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n",
docg3->cascade->base, floor);
break;
default:
doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
goto nomem4;
}
ret = doc_set_driver_info(chip_id, mtd);
if (ret)
goto nomem4;
doc_hamming_ecc_init(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ);
doc_reload_bbt(docg3);
return mtd;
nomem4:
kfree(docg3->bbt);
nomem3:
kfree(mtd);
nomem2:
kfree(docg3);
nomem1:
return ret ? ERR_PTR(ret) : NULL;
}
/**
* doc_release_device - Release a docg3 floor
* @mtd: the device
*/
static void doc_release_device(struct mtd_info *mtd)
{
struct docg3 *docg3 = mtd->priv;
mtd_device_unregister(mtd);
kfree(docg3->bbt);
kfree(docg3);
kfree(mtd);
}
/**
* docg3_resume - Awakens docg3 floor
* @pdev: platfrom device
*
* Returns 0 (always successful)
*/
static int docg3_resume(struct platform_device *pdev)
{
int i;
struct docg3_cascade *cascade;
struct mtd_info **docg3_floors, *mtd;
struct docg3 *docg3;
cascade = platform_get_drvdata(pdev);
docg3_floors = cascade->floors;
mtd = docg3_floors[0];
docg3 = mtd->priv;
doc_dbg("docg3_resume()\n");
for (i = 0; i < 12; i++)
doc_readb(docg3, DOC_IOSPACE_IPL);
return 0;
}
/**
* docg3_suspend - Put in low power mode the docg3 floor
* @pdev: platform device
* @state: power state
*
* Shuts off most of docg3 circuitery to lower power consumption.
*
* Returns 0 if suspend succeeded, -EIO if chip refused suspend
*/
static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
{
int floor, i;
struct docg3_cascade *cascade;
struct mtd_info **docg3_floors, *mtd;
struct docg3 *docg3;
u8 ctrl, pwr_down;
cascade = platform_get_drvdata(pdev);
docg3_floors = cascade->floors;
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
mtd = docg3_floors[floor];
if (!mtd)
continue;
docg3 = mtd->priv;
doc_writeb(docg3, floor, DOC_DEVICESELECT);
ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
ctrl &= ~DOC_CTRL_VIOLATION & ~DOC_CTRL_CE;
doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
for (i = 0; i < 10; i++) {
usleep_range(3000, 4000);
pwr_down = doc_register_readb(docg3, DOC_POWERMODE);
if (pwr_down & DOC_POWERDOWN_READY)
break;
}
if (pwr_down & DOC_POWERDOWN_READY) {
doc_dbg("docg3_suspend(): floor %d powerdown ok\n",
floor);
} else {
doc_err("docg3_suspend(): floor %d powerdown failed\n",
floor);
return -EIO;
}
}
mtd = docg3_floors[0];
docg3 = mtd->priv;
doc_set_asic_mode(docg3, DOC_ASICMODE_POWERDOWN);
return 0;
}
/**
* docg3_probe - Probe the IO space for a DiskOnChip G3 chip
* @pdev: platform device
*
* Probes for a G3 chip at the specified IO space in the platform data
* ressources. The floor 0 must be available.
*
* Returns 0 on success, -ENOMEM, -ENXIO on error
*/
static int __init docg3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtd_info *mtd;
struct resource *ress;
void __iomem *base;
int ret, floor;
struct docg3_cascade *cascade;
ret = -ENXIO;
ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!ress) {
dev_err(dev, "No I/O memory resource defined\n");
return ret;
}
ret = -ENOMEM;
base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE);
if (!base) {
dev_err(dev, "devm_ioremap dev failed\n");
return ret;
}
cascade = devm_kcalloc(dev, DOC_MAX_NBFLOORS, sizeof(*cascade),
GFP_KERNEL);
if (!cascade)
return ret;
cascade->base = base;
mutex_init(&cascade->lock);
cascade->bch = bch_init(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
DOC_ECC_BCH_PRIMPOLY, false);
if (!cascade->bch)
return ret;
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
mtd = doc_probe_device(cascade, floor, dev);
if (IS_ERR(mtd)) {
ret = PTR_ERR(mtd);
goto err_probe;
}
if (!mtd) {
if (floor == 0)
goto notfound;
else
continue;
}
cascade->floors[floor] = mtd;
ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL,
0);
if (ret)
goto err_probe;
doc_dbg_register(cascade->floors[floor]);
}
ret = doc_register_sysfs(pdev, cascade);
if (ret)
goto err_probe;
platform_set_drvdata(pdev, cascade);
return 0;
notfound:
ret = -ENODEV;
dev_info(dev, "No supported DiskOnChip found\n");
err_probe:
bch_free(cascade->bch);
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
if (cascade->floors[floor])
doc_release_device(cascade->floors[floor]);
return ret;
}
/**
* docg3_release - Release the driver
* @pdev: the platform device
*
* Returns 0
*/
static int docg3_release(struct platform_device *pdev)
{
struct docg3_cascade *cascade = platform_get_drvdata(pdev);
struct docg3 *docg3 = cascade->floors[0]->priv;
int floor;
doc_unregister_sysfs(pdev, cascade);
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
if (cascade->floors[floor])
doc_release_device(cascade->floors[floor]);
bch_free(docg3->cascade->bch);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id docg3_dt_ids[] = {
{ .compatible = "m-systems,diskonchip-g3" },
{}
};
MODULE_DEVICE_TABLE(of, docg3_dt_ids);
#endif
static struct platform_driver g3_driver = {
.driver = {
.name = "docg3",
.of_match_table = of_match_ptr(docg3_dt_ids),
},
.suspend = docg3_suspend,
.resume = docg3_resume,
.remove = docg3_release,
};
module_platform_driver_probe(g3_driver, docg3_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Robert Jarzmik <[email protected]>");
MODULE_DESCRIPTION("MTD driver for DiskOnChip G3");
| linux-master | drivers/mtd/devices/docg3.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PMC551 PCI Mezzanine Ram Device
*
* Author:
* Mark Ferrell <[email protected]>
* Copyright 1999,2000 Nortel Networks
*
* Description:
* This driver is intended to support the PMC551 PCI Ram device
* from Ramix Inc. The PMC551 is a PMC Mezzanine module for
* cPCI embedded systems. The device contains a single SROM
* that initially programs the V370PDC chipset onboard the
* device, and various banks of DRAM/SDRAM onboard. This driver
* implements this PCI Ram device as an MTD (Memory Technology
* Device) so that it can be used to hold a file system, or for
* added swap space in embedded systems. Since the memory on
* this board isn't as fast as main memory we do not try to hook
* it into main memory as that would simply reduce performance
* on the system. Using it as a block device allows us to use
* it as high speed swap or for a high speed disk device of some
* sort. Which becomes very useful on diskless systems in the
* embedded market I might add.
*
* Notes:
* Due to what I assume is more buggy SROM, the 64M PMC551 I
* have available claims that all 4 of its DRAM banks have 64MiB
* of ram configured (making a grand total of 256MiB onboard).
* This is slightly annoying since the BAR0 size reflects the
* aperture size, not the dram size, and the V370PDC supplies no
* other method for memory size discovery. This problem is
* mostly only relevant when compiled as a module, as the
* unloading of the module with an aperture size smaller than
* the ram will cause the driver to detect the onboard memory
* size to be equal to the aperture size when the module is
* reloaded. Soooo, to help, the module supports an msize
* option to allow the specification of the onboard memory, and
* an asize option, to allow the specification of the aperture
* size. The aperture must be equal to or less then the memory
* size, the driver will correct this if you screw it up. This
* problem is not relevant for compiled in drivers as compiled
* in drivers only init once.
*
* Credits:
* Saeed Karamooz <[email protected]> of Ramix INC. for the
* initial example code of how to initialize this device and for
* help with questions I had concerning operation of the device.
*
* Most of the MTD code for this driver was originally written
* for the slram.o module in the MTD drivers package which
* allows the mapping of system memory into an MTD device.
* Since the PMC551 memory module is accessed in the same
* fashion as system memory, the slram.c code became a very nice
* fit to the needs of this driver. All we added was PCI
* detection/initialization to the driver and automatically figure
* out the size via the PCI detection.o, later changes by Corey
* Minyard set up the card to utilize a 1M sliding apature.
*
* Corey Minyard <[email protected]>
* * Modified driver to utilize a sliding aperture instead of
* mapping all memory into kernel space which turned out to
* be very wasteful.
* * Located a bug in the SROM's initialization sequence that
* made the memory unusable, added a fix to code to touch up
* the DRAM some.
*
* Bugs/FIXMEs:
* * MUST fix the init function to not spin on a register
* waiting for it to set .. this does not safely handle busted
* devices that never reset the register correctly which will
* cause the system to hang w/ a reboot being the only chance at
* recover. [sort of fixed, could be better]
* * Add I2C handling of the SROM so we can read the SROM's information
* about the aperture size. This should always accurately reflect the
* onboard memory size.
* * Comb the init routine. It's still a bit cludgy on a few things.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <asm/io.h>
#include <linux/pci.h>
#include <linux/mtd/mtd.h>
#define PMC551_VERSION \
"Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n"
#define PCI_VENDOR_ID_V3_SEMI 0x11b0
#define PCI_DEVICE_ID_V3_SEMI_V370PDC 0x0200
#define PMC551_PCI_MEM_MAP0 0x50
#define PMC551_PCI_MEM_MAP1 0x54
#define PMC551_PCI_MEM_MAP_MAP_ADDR_MASK 0x3ff00000
#define PMC551_PCI_MEM_MAP_APERTURE_MASK 0x000000f0
#define PMC551_PCI_MEM_MAP_REG_EN 0x00000002
#define PMC551_PCI_MEM_MAP_ENABLE 0x00000001
#define PMC551_SDRAM_MA 0x60
#define PMC551_SDRAM_CMD 0x62
#define PMC551_DRAM_CFG 0x64
#define PMC551_SYS_CTRL_REG 0x78
#define PMC551_DRAM_BLK0 0x68
#define PMC551_DRAM_BLK1 0x6c
#define PMC551_DRAM_BLK2 0x70
#define PMC551_DRAM_BLK3 0x74
#define PMC551_DRAM_BLK_GET_SIZE(x) (524288 << ((x >> 4) & 0x0f))
#define PMC551_DRAM_BLK_SET_COL_MUX(x, v) (((x) & ~0x00007000) | (((v) & 0x7) << 12))
#define PMC551_DRAM_BLK_SET_ROW_MUX(x, v) (((x) & ~0x00000f00) | (((v) & 0xf) << 8))
struct mypriv {
struct pci_dev *dev;
u_char *start;
u32 base_map0;
u32 curr_map0;
u32 asize;
struct mtd_info *nextpmc551;
};
static struct mtd_info *pmc551list;
static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys);
static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct mypriv *priv = mtd->priv;
u32 soff_hi; /* start address offset hi */
u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
unsigned long end;
u_char *ptr;
size_t retlen;
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_erase(pos:%ld, len:%ld)\n", (long)instr->addr,
(long)instr->len);
#endif
end = instr->addr + instr->len - 1;
eoff_hi = end & ~(priv->asize - 1);
soff_hi = instr->addr & ~(priv->asize - 1);
eoff_lo = end & (priv->asize - 1);
pmc551_point(mtd, instr->addr, instr->len, &retlen,
(void **)&ptr, NULL);
if (soff_hi == eoff_hi || mtd->size == priv->asize) {
/* The whole thing fits within one access, so just one shot
will do it. */
memset(ptr, 0xff, instr->len);
} else {
/* We have to do multiple writes to get all the data
written. */
while (soff_hi != eoff_hi) {
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_erase() soff_hi: %ld, "
"eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
#endif
memset(ptr, 0xff, priv->asize);
if (soff_hi + priv->asize >= mtd->size) {
goto out;
}
soff_hi += priv->asize;
pmc551_point(mtd, (priv->base_map0 | soff_hi),
priv->asize, &retlen,
(void **)&ptr, NULL);
}
memset(ptr, 0xff, eoff_lo);
}
out:
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_erase() done\n");
#endif
return 0;
}
static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct mypriv *priv = mtd->priv;
u32 soff_hi;
u32 soff_lo;
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len);
#endif
soff_hi = from & ~(priv->asize - 1);
soff_lo = from & (priv->asize - 1);
/* Cheap hack optimization */
if (priv->curr_map0 != from) {
pci_write_config_dword(priv->dev, PMC551_PCI_MEM_MAP0,
(priv->base_map0 | soff_hi));
priv->curr_map0 = soff_hi;
}
*virt = priv->start + soff_lo;
*retlen = len;
return 0;
}
static int pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_unpoint()\n");
#endif
return 0;
}
static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t * retlen, u_char * buf)
{
struct mypriv *priv = mtd->priv;
u32 soff_hi; /* start address offset hi */
u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
unsigned long end;
u_char *ptr;
u_char *copyto = buf;
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_read(pos:%ld, len:%ld) asize: %ld\n",
(long)from, (long)len, (long)priv->asize);
#endif
end = from + len - 1;
soff_hi = from & ~(priv->asize - 1);
eoff_hi = end & ~(priv->asize - 1);
eoff_lo = end & (priv->asize - 1);
pmc551_point(mtd, from, len, retlen, (void **)&ptr, NULL);
if (soff_hi == eoff_hi) {
/* The whole thing fits within one access, so just one shot
will do it. */
memcpy(copyto, ptr, len);
copyto += len;
} else {
/* We have to do multiple writes to get all the data
written. */
while (soff_hi != eoff_hi) {
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_read() soff_hi: %ld, "
"eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
#endif
memcpy(copyto, ptr, priv->asize);
copyto += priv->asize;
if (soff_hi + priv->asize >= mtd->size) {
goto out;
}
soff_hi += priv->asize;
pmc551_point(mtd, soff_hi, priv->asize, retlen,
(void **)&ptr, NULL);
}
memcpy(copyto, ptr, eoff_lo);
copyto += eoff_lo;
}
out:
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_read() done\n");
#endif
*retlen = copyto - buf;
return 0;
}
static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t * retlen, const u_char * buf)
{
struct mypriv *priv = mtd->priv;
u32 soff_hi; /* start address offset hi */
u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
unsigned long end;
u_char *ptr;
const u_char *copyfrom = buf;
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_write(pos:%ld, len:%ld) asize:%ld\n",
(long)to, (long)len, (long)priv->asize);
#endif
end = to + len - 1;
soff_hi = to & ~(priv->asize - 1);
eoff_hi = end & ~(priv->asize - 1);
eoff_lo = end & (priv->asize - 1);
pmc551_point(mtd, to, len, retlen, (void **)&ptr, NULL);
if (soff_hi == eoff_hi) {
/* The whole thing fits within one access, so just one shot
will do it. */
memcpy(ptr, copyfrom, len);
copyfrom += len;
} else {
/* We have to do multiple writes to get all the data
written. */
while (soff_hi != eoff_hi) {
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_write() soff_hi: %ld, "
"eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
#endif
memcpy(ptr, copyfrom, priv->asize);
copyfrom += priv->asize;
if (soff_hi >= mtd->size) {
goto out;
}
soff_hi += priv->asize;
pmc551_point(mtd, soff_hi, priv->asize, retlen,
(void **)&ptr, NULL);
}
memcpy(ptr, copyfrom, eoff_lo);
copyfrom += eoff_lo;
}
out:
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_write() done\n");
#endif
*retlen = copyfrom - buf;
return 0;
}
/*
* Fixup routines for the V370PDC
* PCI device ID 0x020011b0
*
* This function basically kick starts the DRAM oboard the card and gets it
* ready to be used. Before this is done the device reads VERY erratic, so
* much that it can crash the Linux 2.2.x series kernels when a user cat's
* /proc/pci .. though that is mainly a kernel bug in handling the PCI DEVSEL
* register. FIXME: stop spinning on registers .. must implement a timeout
* mechanism
* returns the size of the memory region found.
*/
static int __init fixup_pmc551(struct pci_dev *dev)
{
#ifdef CONFIG_MTD_PMC551_BUGFIX
u32 dram_data;
#endif
u32 size, dcmd, cfg, dtmp;
u16 cmd, tmp, i;
u8 bcmd, counter;
/* Sanity Check */
if (!dev) {
return -ENODEV;
}
/*
* Attempt to reset the card
* FIXME: Stop Spinning registers
*/
counter = 0;
/* unlock registers */
pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, 0xA5);
/* read in old data */
pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd);
/* bang the reset line up and down for a few */
for (i = 0; i < 10; i++) {
counter = 0;
bcmd &= ~0x80;
while (counter++ < 100) {
pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
}
counter = 0;
bcmd |= 0x80;
while (counter++ < 100) {
pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
}
}
bcmd |= (0x40 | 0x20);
pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
/*
* Take care and turn off the memory on the device while we
* tweak the configurations
*/
pci_read_config_word(dev, PCI_COMMAND, &cmd);
tmp = cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
pci_write_config_word(dev, PCI_COMMAND, tmp);
/*
* Disable existing aperture before probing memory size
*/
pci_read_config_dword(dev, PMC551_PCI_MEM_MAP0, &dcmd);
dtmp = (dcmd | PMC551_PCI_MEM_MAP_ENABLE | PMC551_PCI_MEM_MAP_REG_EN);
pci_write_config_dword(dev, PMC551_PCI_MEM_MAP0, dtmp);
/*
* Grab old BAR0 config so that we can figure out memory size
* This is another bit of kludge going on. The reason for the
* redundancy is I am hoping to retain the original configuration
* previously assigned to the card by the BIOS or some previous
* fixup routine in the kernel. So we read the old config into cfg,
* then write all 1's to the memory space, read back the result into
* "size", and then write back all the old config.
*/
pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &cfg);
#ifndef CONFIG_MTD_PMC551_BUGFIX
pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, ~0);
pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &size);
size = (size & PCI_BASE_ADDRESS_MEM_MASK);
size &= ~(size - 1);
pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, cfg);
#else
/*
* Get the size of the memory by reading all the DRAM size values
* and adding them up.
*
* KLUDGE ALERT: the boards we are using have invalid column and
* row mux values. We fix them here, but this will break other
* memory configurations.
*/
pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dram_data);
size = PMC551_DRAM_BLK_GET_SIZE(dram_data);
dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
pci_write_config_dword(dev, PMC551_DRAM_BLK0, dram_data);
pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dram_data);
size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
pci_write_config_dword(dev, PMC551_DRAM_BLK1, dram_data);
pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dram_data);
size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
pci_write_config_dword(dev, PMC551_DRAM_BLK2, dram_data);
pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dram_data);
size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
pci_write_config_dword(dev, PMC551_DRAM_BLK3, dram_data);
/*
* Oops .. something went wrong
*/
if ((size &= PCI_BASE_ADDRESS_MEM_MASK) == 0) {
return -ENODEV;
}
#endif /* CONFIG_MTD_PMC551_BUGFIX */
if ((cfg & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) {
return -ENODEV;
}
/*
* Precharge Dram
*/
pci_write_config_word(dev, PMC551_SDRAM_MA, 0x0400);
pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x00bf);
/*
* Wait until command has gone through
* FIXME: register spinning issue
*/
do {
pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
if (counter++ > 100)
break;
} while ((PCI_COMMAND_IO) & cmd);
/*
* Turn on auto refresh
* The loop is taken directly from Ramix's example code. I assume that
* this must be held high for some duration of time, but I can find no
* documentation refrencing the reasons why.
*/
for (i = 1; i <= 8; i++) {
pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x0df);
/*
* Make certain command has gone through
* FIXME: register spinning issue
*/
counter = 0;
do {
pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
if (counter++ > 100)
break;
} while ((PCI_COMMAND_IO) & cmd);
}
pci_write_config_word(dev, PMC551_SDRAM_MA, 0x0020);
pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x0ff);
/*
* Wait until command completes
* FIXME: register spinning issue
*/
counter = 0;
do {
pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
if (counter++ > 100)
break;
} while ((PCI_COMMAND_IO) & cmd);
pci_read_config_dword(dev, PMC551_DRAM_CFG, &dcmd);
dcmd |= 0x02000000;
pci_write_config_dword(dev, PMC551_DRAM_CFG, dcmd);
/*
* Check to make certain fast back-to-back, if not
* then set it so
*/
pci_read_config_word(dev, PCI_STATUS, &cmd);
if ((cmd & PCI_COMMAND_FAST_BACK) == 0) {
cmd |= PCI_COMMAND_FAST_BACK;
pci_write_config_word(dev, PCI_STATUS, cmd);
}
/*
* Check to make certain the DEVSEL is set correctly, this device
* has a tendency to assert DEVSEL and TRDY when a write is performed
* to the memory when memory is read-only
*/
if ((cmd & PCI_STATUS_DEVSEL_MASK) != 0x0) {
cmd &= ~PCI_STATUS_DEVSEL_MASK;
pci_write_config_word(dev, PCI_STATUS, cmd);
}
/*
* Set to be prefetchable and put everything back based on old cfg.
* it's possible that the reset of the V370PDC nuked the original
* setup
*/
/*
cfg |= PCI_BASE_ADDRESS_MEM_PREFETCH;
pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg );
*/
/*
* Turn PCI memory and I/O bus access back on
*/
pci_write_config_word(dev, PCI_COMMAND,
PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
#ifdef CONFIG_MTD_PMC551_DEBUG
/*
* Some screen fun
*/
printk(KERN_DEBUG "pmc551: %d%sB (0x%x) of %sprefetchable memory at "
"0x%llx\n", (size < 1024) ? size : (size < 1048576) ?
size >> 10 : size >> 20,
(size < 1024) ? "" : (size < 1048576) ? "Ki" : "Mi", size,
((dcmd & (0x1 << 3)) == 0) ? "non-" : "",
(unsigned long long)pci_resource_start(dev, 0));
/*
* Check to see the state of the memory
*/
pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dcmd);
printk(KERN_DEBUG "pmc551: DRAM_BLK0 Flags: %s,%s\n"
"pmc551: DRAM_BLK0 Size: %d at %d\n"
"pmc551: DRAM_BLK0 Row MUX: %d, Col MUX: %d\n",
(((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
(((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
PMC551_DRAM_BLK_GET_SIZE(dcmd),
((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
((dcmd >> 9) & 0xF));
pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dcmd);
printk(KERN_DEBUG "pmc551: DRAM_BLK1 Flags: %s,%s\n"
"pmc551: DRAM_BLK1 Size: %d at %d\n"
"pmc551: DRAM_BLK1 Row MUX: %d, Col MUX: %d\n",
(((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
(((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
PMC551_DRAM_BLK_GET_SIZE(dcmd),
((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
((dcmd >> 9) & 0xF));
pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dcmd);
printk(KERN_DEBUG "pmc551: DRAM_BLK2 Flags: %s,%s\n"
"pmc551: DRAM_BLK2 Size: %d at %d\n"
"pmc551: DRAM_BLK2 Row MUX: %d, Col MUX: %d\n",
(((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
(((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
PMC551_DRAM_BLK_GET_SIZE(dcmd),
((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
((dcmd >> 9) & 0xF));
pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dcmd);
printk(KERN_DEBUG "pmc551: DRAM_BLK3 Flags: %s,%s\n"
"pmc551: DRAM_BLK3 Size: %d at %d\n"
"pmc551: DRAM_BLK3 Row MUX: %d, Col MUX: %d\n",
(((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
(((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
PMC551_DRAM_BLK_GET_SIZE(dcmd),
((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
((dcmd >> 9) & 0xF));
pci_read_config_word(dev, PCI_COMMAND, &cmd);
printk(KERN_DEBUG "pmc551: Memory Access %s\n",
(((0x1 << 1) & cmd) == 0) ? "off" : "on");
printk(KERN_DEBUG "pmc551: I/O Access %s\n",
(((0x1 << 0) & cmd) == 0) ? "off" : "on");
pci_read_config_word(dev, PCI_STATUS, &cmd);
printk(KERN_DEBUG "pmc551: Devsel %s\n",
((PCI_STATUS_DEVSEL_MASK & cmd) == 0x000) ? "Fast" :
((PCI_STATUS_DEVSEL_MASK & cmd) == 0x200) ? "Medium" :
((PCI_STATUS_DEVSEL_MASK & cmd) == 0x400) ? "Slow" : "Invalid");
printk(KERN_DEBUG "pmc551: %sFast Back-to-Back\n",
((PCI_COMMAND_FAST_BACK & cmd) == 0) ? "Not " : "");
pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd);
printk(KERN_DEBUG "pmc551: EEPROM is under %s control\n"
"pmc551: System Control Register is %slocked to PCI access\n"
"pmc551: System Control Register is %slocked to EEPROM access\n",
(bcmd & 0x1) ? "software" : "hardware",
(bcmd & 0x20) ? "" : "un", (bcmd & 0x40) ? "" : "un");
#endif
return size;
}
/*
* Kernel version specific module stuffages
*/
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Ferrell <[email protected]>");
MODULE_DESCRIPTION(PMC551_VERSION);
/*
* Stuff these outside the ifdef so as to not bust compiled in driver support
*/
static int msize = 0;
static int asize = 0;
module_param(msize, int, 0);
MODULE_PARM_DESC(msize, "memory size in MiB [1 - 1024]");
module_param(asize, int, 0);
MODULE_PARM_DESC(asize, "aperture size, must be <= memsize [1-1024]");
/*
* PMC551 Card Initialization
*/
static int __init init_pmc551(void)
{
struct pci_dev *PCI_Device = NULL;
struct mypriv *priv;
int found = 0;
struct mtd_info *mtd;
int length = 0;
if (msize) {
msize = (1 << (ffs(msize) - 1)) << 20;
if (msize > (1 << 30)) {
printk(KERN_NOTICE "pmc551: Invalid memory size [%d]\n",
msize);
return -EINVAL;
}
}
if (asize) {
asize = (1 << (ffs(asize) - 1)) << 20;
if (asize > (1 << 30)) {
printk(KERN_NOTICE "pmc551: Invalid aperture size "
"[%d]\n", asize);
return -EINVAL;
}
}
printk(KERN_INFO PMC551_VERSION);
/*
* PCU-bus chipset probe.
*/
for (;;) {
if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI,
PCI_DEVICE_ID_V3_SEMI_V370PDC,
PCI_Device)) == NULL) {
break;
}
printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%llx\n",
(unsigned long long)pci_resource_start(PCI_Device, 0));
/*
* The PMC551 device acts VERY weird if you don't init it
* first. i.e. it will not correctly report devsel. If for
* some reason the sdram is in a wrote-protected state the
* device will DEVSEL when it is written to causing problems
* with the oldproc.c driver in
* some kernels (2.2.*)
*/
if ((length = fixup_pmc551(PCI_Device)) <= 0) {
printk(KERN_NOTICE "pmc551: Cannot init SDRAM\n");
break;
}
/*
* This is needed until the driver is capable of reading the
* onboard I2C SROM to discover the "real" memory size.
*/
if (msize) {
length = msize;
printk(KERN_NOTICE "pmc551: Using specified memory "
"size 0x%x\n", length);
} else {
msize = length;
}
mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
if (!mtd)
break;
priv = kzalloc(sizeof(struct mypriv), GFP_KERNEL);
if (!priv) {
kfree(mtd);
break;
}
mtd->priv = priv;
priv->dev = PCI_Device;
if (asize > length) {
printk(KERN_NOTICE "pmc551: reducing aperture size to "
"fit %dM\n", length >> 20);
priv->asize = asize = length;
} else if (asize == 0 || asize == length) {
printk(KERN_NOTICE "pmc551: Using existing aperture "
"size %dM\n", length >> 20);
priv->asize = asize = length;
} else {
printk(KERN_NOTICE "pmc551: Using specified aperture "
"size %dM\n", asize >> 20);
priv->asize = asize;
}
priv->start = pci_iomap(PCI_Device, 0, priv->asize);
if (!priv->start) {
printk(KERN_NOTICE "pmc551: Unable to map IO space\n");
kfree(mtd->priv);
kfree(mtd);
break;
}
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551: setting aperture to %d\n",
ffs(priv->asize >> 20) - 1);
#endif
priv->base_map0 = (PMC551_PCI_MEM_MAP_REG_EN
| PMC551_PCI_MEM_MAP_ENABLE
| (ffs(priv->asize >> 20) - 1) << 4);
priv->curr_map0 = priv->base_map0;
pci_write_config_dword(priv->dev, PMC551_PCI_MEM_MAP0,
priv->curr_map0);
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551: aperture set to %d\n",
(priv->base_map0 & 0xF0) >> 4);
#endif
mtd->size = msize;
mtd->flags = MTD_CAP_RAM;
mtd->_erase = pmc551_erase;
mtd->_read = pmc551_read;
mtd->_write = pmc551_write;
mtd->_point = pmc551_point;
mtd->_unpoint = pmc551_unpoint;
mtd->type = MTD_RAM;
mtd->name = "PMC551 RAM board";
mtd->erasesize = 0x10000;
mtd->writesize = 1;
mtd->owner = THIS_MODULE;
if (mtd_device_register(mtd, NULL, 0)) {
printk(KERN_NOTICE "pmc551: Failed to register new device\n");
pci_iounmap(PCI_Device, priv->start);
kfree(mtd->priv);
kfree(mtd);
break;
}
/* Keep a reference as the mtd_device_register worked */
pci_dev_get(PCI_Device);
printk(KERN_NOTICE "Registered pmc551 memory device.\n");
printk(KERN_NOTICE "Mapped %dMiB of memory from 0x%p to 0x%p\n",
priv->asize >> 20,
priv->start, priv->start + priv->asize);
printk(KERN_NOTICE "Total memory is %d%sB\n",
(length < 1024) ? length :
(length < 1048576) ? length >> 10 : length >> 20,
(length < 1024) ? "" : (length < 1048576) ? "Ki" : "Mi");
priv->nextpmc551 = pmc551list;
pmc551list = mtd;
found++;
}
/* Exited early, reference left over */
pci_dev_put(PCI_Device);
if (!pmc551list) {
printk(KERN_NOTICE "pmc551: not detected\n");
return -ENODEV;
} else {
printk(KERN_NOTICE "pmc551: %d pmc551 devices loaded\n", found);
return 0;
}
}
/*
* PMC551 Card Cleanup
*/
static void __exit cleanup_pmc551(void)
{
int found = 0;
struct mtd_info *mtd;
struct mypriv *priv;
while ((mtd = pmc551list)) {
priv = mtd->priv;
pmc551list = priv->nextpmc551;
if (priv->start) {
printk(KERN_DEBUG "pmc551: unmapping %dMiB starting at "
"0x%p\n", priv->asize >> 20, priv->start);
pci_iounmap(priv->dev, priv->start);
}
pci_dev_put(priv->dev);
kfree(mtd->priv);
mtd_device_unregister(mtd);
kfree(mtd);
found++;
}
printk(KERN_NOTICE "pmc551: %d pmc551 devices unloaded\n", found);
}
module_init(init_pmc551);
module_exit(cleanup_pmc551);
| linux-master | drivers/mtd/devices/pmc551.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) ???? Jochen Schäuble <[email protected]>
* Copyright (c) 2003-2004 Joern Engel <[email protected]>
*
* Usage:
*
* one commend line parameter per device, each in the form:
* phram=<name>,<start>,<len>[,<erasesize>]
* <name> may be up to 63 characters.
* <start>, <len>, and <erasesize> can be octal, decimal or hexadecimal. If followed
* by "ki", "Mi" or "Gi", the numbers will be interpreted as kilo, mega or
* gigabytes. <erasesize> is optional and defaults to PAGE_SIZE.
*
* Example:
* phram=swap,64Mi,128Mi phram=test,900Mi,1Mi,64Ki
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/io.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <asm/div64.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of.h>
struct phram_mtd_list {
struct mtd_info mtd;
struct list_head list;
bool cached;
};
static LIST_HEAD(phram_list);
static int phram_erase(struct mtd_info *mtd, struct erase_info *instr)
{
u_char *start = mtd->priv;
memset(start + instr->addr, 0xff, instr->len);
return 0;
}
static int phram_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
*virt = mtd->priv + from;
*retlen = len;
return 0;
}
static int phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
return 0;
}
static int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
u_char *start = mtd->priv;
memcpy(buf, start + from, len);
*retlen = len;
return 0;
}
static int phram_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
u_char *start = mtd->priv;
memcpy(start + to, buf, len);
*retlen = len;
return 0;
}
static int phram_map(struct phram_mtd_list *phram, phys_addr_t start, size_t len)
{
void *addr = NULL;
if (phram->cached)
addr = memremap(start, len, MEMREMAP_WB);
else
addr = (void __force *)ioremap(start, len);
if (!addr)
return -EIO;
phram->mtd.priv = addr;
return 0;
}
static void phram_unmap(struct phram_mtd_list *phram)
{
void *addr = phram->mtd.priv;
if (phram->cached) {
memunmap(addr);
return;
}
iounmap((void __iomem *)addr);
}
static void unregister_devices(void)
{
struct phram_mtd_list *this, *safe;
list_for_each_entry_safe(this, safe, &phram_list, list) {
mtd_device_unregister(&this->mtd);
phram_unmap(this);
kfree(this->mtd.name);
kfree(this);
}
}
static int register_device(struct platform_device *pdev, const char *name,
phys_addr_t start, size_t len, uint32_t erasesize)
{
struct device_node *np = pdev ? pdev->dev.of_node : NULL;
bool cached = np ? !of_property_read_bool(np, "no-map") : false;
struct phram_mtd_list *new;
int ret = -ENOMEM;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
goto out0;
new->cached = cached;
ret = phram_map(new, start, len);
if (ret) {
pr_err("ioremap failed\n");
goto out1;
}
new->mtd.name = name;
new->mtd.size = len;
new->mtd.flags = MTD_CAP_RAM;
new->mtd._erase = phram_erase;
new->mtd._point = phram_point;
new->mtd._unpoint = phram_unpoint;
new->mtd._read = phram_read;
new->mtd._write = phram_write;
new->mtd.owner = THIS_MODULE;
new->mtd.type = MTD_RAM;
new->mtd.erasesize = erasesize;
new->mtd.writesize = 1;
mtd_set_of_node(&new->mtd, np);
ret = -EAGAIN;
if (mtd_device_register(&new->mtd, NULL, 0)) {
pr_err("Failed to register new device\n");
goto out2;
}
if (pdev)
platform_set_drvdata(pdev, new);
else
list_add_tail(&new->list, &phram_list);
return 0;
out2:
phram_unmap(new);
out1:
kfree(new);
out0:
return ret;
}
static int parse_num64(uint64_t *num64, char *token)
{
size_t len;
int shift = 0;
int ret;
len = strlen(token);
/* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
if (len > 2) {
if (token[len - 1] == 'i') {
switch (token[len - 2]) {
case 'G':
shift += 10;
fallthrough;
case 'M':
shift += 10;
fallthrough;
case 'k':
shift += 10;
token[len - 2] = 0;
break;
default:
return -EINVAL;
}
}
}
ret = kstrtou64(token, 0, num64);
*num64 <<= shift;
return ret;
}
static int parse_name(char **pname, const char *token)
{
size_t len;
char *name;
len = strlen(token) + 1;
if (len > 64)
return -ENOSPC;
name = kstrdup(token, GFP_KERNEL);
if (!name)
return -ENOMEM;
*pname = name;
return 0;
}
static inline void kill_final_newline(char *str)
{
char *newline = strrchr(str, '\n');
if (newline && !newline[1])
*newline = 0;
}
#define parse_err(fmt, args...) do { \
pr_err(fmt , ## args); \
return 1; \
} while (0)
#ifndef MODULE
static int phram_init_called;
/*
* This shall contain the module parameter if any. It is of the form:
* - phram=<device>,<address>,<size>[,<erasesize>] for module case
* - phram.phram=<device>,<address>,<size>[,<erasesize>] for built-in case
* We leave 64 bytes for the device name, 20 for the address , 20 for the
* size and 20 for the erasesize.
* Example: phram.phram=rootfs,0xa0000000,512Mi,65536
*/
static char phram_paramline[64 + 20 + 20 + 20];
#endif
static int phram_setup(const char *val)
{
char buf[64 + 20 + 20 + 20], *str = buf;
char *token[4];
char *name;
uint64_t start;
uint64_t len;
uint64_t erasesize = PAGE_SIZE;
uint32_t rem;
int i, ret;
if (strnlen(val, sizeof(buf)) >= sizeof(buf))
parse_err("parameter too long\n");
strcpy(str, val);
kill_final_newline(str);
for (i = 0; i < 4; i++)
token[i] = strsep(&str, ",");
if (str)
parse_err("too many arguments\n");
if (!token[2])
parse_err("not enough arguments\n");
ret = parse_name(&name, token[0]);
if (ret)
return ret;
ret = parse_num64(&start, token[1]);
if (ret) {
parse_err("illegal start address\n");
goto error;
}
ret = parse_num64(&len, token[2]);
if (ret) {
parse_err("illegal device length\n");
goto error;
}
if (token[3]) {
ret = parse_num64(&erasesize, token[3]);
if (ret) {
parse_err("illegal erasesize\n");
goto error;
}
}
if (len == 0 || erasesize == 0 || erasesize > len
|| erasesize > UINT_MAX) {
parse_err("illegal erasesize or len\n");
ret = -EINVAL;
goto error;
}
div_u64_rem(len, (uint32_t)erasesize, &rem);
if (rem) {
parse_err("len is not multiple of erasesize\n");
ret = -EINVAL;
goto error;
}
ret = register_device(NULL, name, start, len, (uint32_t)erasesize);
if (ret)
goto error;
pr_info("%s device: %#llx at %#llx for erasesize %#llx\n", name, len, start, erasesize);
return 0;
error:
kfree(name);
return ret;
}
static int phram_param_call(const char *val, const struct kernel_param *kp)
{
#ifdef MODULE
return phram_setup(val);
#else
/*
* If more parameters are later passed in via
* /sys/module/phram/parameters/phram
* and init_phram() has already been called,
* we can parse the argument now.
*/
if (phram_init_called)
return phram_setup(val);
/*
* During early boot stage, we only save the parameters
* here. We must parse them later: if the param passed
* from kernel boot command line, phram_param_call() is
* called so early that it is not possible to resolve
* the device (even kmalloc() fails). Defer that work to
* phram_setup().
*/
if (strlen(val) >= sizeof(phram_paramline))
return -ENOSPC;
strcpy(phram_paramline, val);
return 0;
#endif
}
module_param_call(phram, phram_param_call, NULL, NULL, 0200);
MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>[,<erasesize>]\"");
#ifdef CONFIG_OF
static const struct of_device_id phram_of_match[] = {
{ .compatible = "phram" },
{}
};
MODULE_DEVICE_TABLE(of, phram_of_match);
#endif
static int phram_probe(struct platform_device *pdev)
{
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENOMEM;
/* mtd_set_of_node() reads name from "label" */
return register_device(pdev, NULL, res->start, resource_size(res),
PAGE_SIZE);
}
static int phram_remove(struct platform_device *pdev)
{
struct phram_mtd_list *phram = platform_get_drvdata(pdev);
mtd_device_unregister(&phram->mtd);
phram_unmap(phram);
kfree(phram);
return 0;
}
static struct platform_driver phram_driver = {
.probe = phram_probe,
.remove = phram_remove,
.driver = {
.name = "phram",
.of_match_table = of_match_ptr(phram_of_match),
},
};
static int __init init_phram(void)
{
int ret;
ret = platform_driver_register(&phram_driver);
if (ret)
return ret;
#ifndef MODULE
if (phram_paramline[0])
ret = phram_setup(phram_paramline);
phram_init_called = 1;
#endif
if (ret)
platform_driver_unregister(&phram_driver);
return ret;
}
static void __exit cleanup_phram(void)
{
unregister_devices();
platform_driver_unregister(&phram_driver);
}
module_init(init_phram);
module_exit(cleanup_phram);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Joern Engel <[email protected]>");
MODULE_DESCRIPTION("MTD driver for physical RAM");
| linux-master | drivers/mtd/devices/phram.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* mchp23k256.c
*
* Driver for Microchip 23k256 SPI RAM chips
*
* Copyright © 2016 Andrew Lunn <[email protected]>
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/sizes.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
#include <linux/of.h>
#define MAX_CMD_SIZE 4
struct mchp23_caps {
u8 addr_width;
unsigned int size;
};
struct mchp23k256_flash {
struct spi_device *spi;
struct mutex lock;
struct mtd_info mtd;
const struct mchp23_caps *caps;
};
#define MCHP23K256_CMD_WRITE_STATUS 0x01
#define MCHP23K256_CMD_WRITE 0x02
#define MCHP23K256_CMD_READ 0x03
#define MCHP23K256_MODE_SEQ BIT(6)
#define to_mchp23k256_flash(x) container_of(x, struct mchp23k256_flash, mtd)
static void mchp23k256_addr2cmd(struct mchp23k256_flash *flash,
unsigned int addr, u8 *cmd)
{
int i;
/*
* Address is sent in big endian (MSB first) and we skip
* the first entry of the cmd array which contains the cmd
* opcode.
*/
for (i = flash->caps->addr_width; i > 0; i--, addr >>= 8)
cmd[i] = addr;
}
static int mchp23k256_cmdsz(struct mchp23k256_flash *flash)
{
return 1 + flash->caps->addr_width;
}
static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const unsigned char *buf)
{
struct mchp23k256_flash *flash = to_mchp23k256_flash(mtd);
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
int ret, cmd_len;
spi_message_init(&message);
cmd_len = mchp23k256_cmdsz(flash);
command[0] = MCHP23K256_CMD_WRITE;
mchp23k256_addr2cmd(flash, to, command);
transfer[0].tx_buf = command;
transfer[0].len = cmd_len;
spi_message_add_tail(&transfer[0], &message);
transfer[1].tx_buf = buf;
transfer[1].len = len;
spi_message_add_tail(&transfer[1], &message);
mutex_lock(&flash->lock);
ret = spi_sync(flash->spi, &message);
mutex_unlock(&flash->lock);
if (ret)
return ret;
if (retlen && message.actual_length > cmd_len)
*retlen += message.actual_length - cmd_len;
return 0;
}
static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, unsigned char *buf)
{
struct mchp23k256_flash *flash = to_mchp23k256_flash(mtd);
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
int ret, cmd_len;
spi_message_init(&message);
cmd_len = mchp23k256_cmdsz(flash);
memset(&transfer, 0, sizeof(transfer));
command[0] = MCHP23K256_CMD_READ;
mchp23k256_addr2cmd(flash, from, command);
transfer[0].tx_buf = command;
transfer[0].len = cmd_len;
spi_message_add_tail(&transfer[0], &message);
transfer[1].rx_buf = buf;
transfer[1].len = len;
spi_message_add_tail(&transfer[1], &message);
mutex_lock(&flash->lock);
ret = spi_sync(flash->spi, &message);
mutex_unlock(&flash->lock);
if (ret)
return ret;
if (retlen && message.actual_length > cmd_len)
*retlen += message.actual_length - cmd_len;
return 0;
}
/*
* Set the device into sequential mode. This allows read/writes to the
* entire SRAM in a single operation
*/
static int mchp23k256_set_mode(struct spi_device *spi)
{
struct spi_transfer transfer = {};
struct spi_message message;
unsigned char command[2];
spi_message_init(&message);
command[0] = MCHP23K256_CMD_WRITE_STATUS;
command[1] = MCHP23K256_MODE_SEQ;
transfer.tx_buf = command;
transfer.len = sizeof(command);
spi_message_add_tail(&transfer, &message);
return spi_sync(spi, &message);
}
static const struct mchp23_caps mchp23k256_caps = {
.size = SZ_32K,
.addr_width = 2,
};
static const struct mchp23_caps mchp23lcv1024_caps = {
.size = SZ_128K,
.addr_width = 3,
};
static int mchp23k256_probe(struct spi_device *spi)
{
struct mchp23k256_flash *flash;
struct flash_platform_data *data;
int err;
flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
if (!flash)
return -ENOMEM;
flash->spi = spi;
mutex_init(&flash->lock);
spi_set_drvdata(spi, flash);
err = mchp23k256_set_mode(spi);
if (err)
return err;
data = dev_get_platdata(&spi->dev);
flash->caps = of_device_get_match_data(&spi->dev);
if (!flash->caps)
flash->caps = &mchp23k256_caps;
mtd_set_of_node(&flash->mtd, spi->dev.of_node);
flash->mtd.dev.parent = &spi->dev;
flash->mtd.type = MTD_RAM;
flash->mtd.flags = MTD_CAP_RAM;
flash->mtd.writesize = 1;
flash->mtd.size = flash->caps->size;
flash->mtd._read = mchp23k256_read;
flash->mtd._write = mchp23k256_write;
err = mtd_device_register(&flash->mtd, data ? data->parts : NULL,
data ? data->nr_parts : 0);
if (err)
return err;
return 0;
}
static void mchp23k256_remove(struct spi_device *spi)
{
struct mchp23k256_flash *flash = spi_get_drvdata(spi);
WARN_ON(mtd_device_unregister(&flash->mtd));
}
static const struct of_device_id mchp23k256_of_table[] = {
{
.compatible = "microchip,mchp23k256",
.data = &mchp23k256_caps,
},
{
.compatible = "microchip,mchp23lcv1024",
.data = &mchp23lcv1024_caps,
},
{}
};
MODULE_DEVICE_TABLE(of, mchp23k256_of_table);
static const struct spi_device_id mchp23k256_spi_ids[] = {
{
.name = "mchp23k256",
.driver_data = (kernel_ulong_t)&mchp23k256_caps,
},
{
.name = "mchp23lcv1024",
.driver_data = (kernel_ulong_t)&mchp23lcv1024_caps,
},
{}
};
MODULE_DEVICE_TABLE(spi, mchp23k256_spi_ids);
static struct spi_driver mchp23k256_driver = {
.driver = {
.name = "mchp23k256",
.of_match_table = mchp23k256_of_table,
},
.probe = mchp23k256_probe,
.remove = mchp23k256_remove,
.id_table = mchp23k256_spi_ids,
};
module_spi_driver(mchp23k256_driver);
MODULE_DESCRIPTION("MTD SPI driver for MCHP23K256 RAM chips");
MODULE_AUTHOR("Andrew Lunn <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("spi:mchp23k256");
| linux-master | drivers/mtd/devices/mchp23k256.c |
/*
* block2mtd.c - create an mtd from a block device
*
* Copyright (C) 2001,2002 Simon Evans <[email protected]>
* Copyright (C) 2004-2006 Joern Engel <[email protected]>
*
* Licence: GPL
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/*
* When the first attempt at device initialization fails, we may need to
* wait a little bit and retry. This timeout, by default 3 seconds, gives
* device time to start up. Required on BCM2708 and a few other chipsets.
*/
#define MTD_DEFAULT_TIMEOUT 3
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/pagemap.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/slab.h>
#include <linux/major.h>
/* Maximum number of comma-separated items in the 'block2mtd=' parameter */
#define BLOCK2MTD_PARAM_MAX_COUNT 3
/* Info for the block device */
struct block2mtd_dev {
struct list_head list;
struct block_device *blkdev;
struct mtd_info mtd;
struct mutex write_mutex;
};
/* Static info about the MTD, used in cleanup_module */
static LIST_HEAD(blkmtd_device_list);
static struct page *page_read(struct address_space *mapping, pgoff_t index)
{
return read_mapping_page(mapping, index, NULL);
}
/* erase a specified part of the device */
static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
{
struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
struct page *page;
pgoff_t index = to >> PAGE_SHIFT; // page index
int pages = len >> PAGE_SHIFT;
u_long *p;
u_long *max;
while (pages) {
page = page_read(mapping, index);
if (IS_ERR(page))
return PTR_ERR(page);
max = page_address(page) + PAGE_SIZE;
for (p=page_address(page); p<max; p++)
if (*p != -1UL) {
lock_page(page);
memset(page_address(page), 0xff, PAGE_SIZE);
set_page_dirty(page);
unlock_page(page);
balance_dirty_pages_ratelimited(mapping);
break;
}
put_page(page);
pages--;
index++;
}
return 0;
}
static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct block2mtd_dev *dev = mtd->priv;
size_t from = instr->addr;
size_t len = instr->len;
int err;
mutex_lock(&dev->write_mutex);
err = _block2mtd_erase(dev, from, len);
mutex_unlock(&dev->write_mutex);
if (err)
pr_err("erase failed err = %d\n", err);
return err;
}
static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct block2mtd_dev *dev = mtd->priv;
struct page *page;
pgoff_t index = from >> PAGE_SHIFT;
int offset = from & (PAGE_SIZE-1);
int cpylen;
while (len) {
if ((offset + len) > PAGE_SIZE)
cpylen = PAGE_SIZE - offset; // multiple pages
else
cpylen = len; // this page
len = len - cpylen;
page = page_read(dev->blkdev->bd_inode->i_mapping, index);
if (IS_ERR(page))
return PTR_ERR(page);
memcpy(buf, page_address(page) + offset, cpylen);
put_page(page);
if (retlen)
*retlen += cpylen;
buf += cpylen;
offset = 0;
index++;
}
return 0;
}
/* write data to the underlying device */
static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
loff_t to, size_t len, size_t *retlen)
{
struct page *page;
struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
pgoff_t index = to >> PAGE_SHIFT; // page index
int offset = to & ~PAGE_MASK; // page offset
int cpylen;
while (len) {
if ((offset+len) > PAGE_SIZE)
cpylen = PAGE_SIZE - offset; // multiple pages
else
cpylen = len; // this page
len = len - cpylen;
page = page_read(mapping, index);
if (IS_ERR(page))
return PTR_ERR(page);
if (memcmp(page_address(page)+offset, buf, cpylen)) {
lock_page(page);
memcpy(page_address(page) + offset, buf, cpylen);
set_page_dirty(page);
unlock_page(page);
balance_dirty_pages_ratelimited(mapping);
}
put_page(page);
if (retlen)
*retlen += cpylen;
buf += cpylen;
offset = 0;
index++;
}
return 0;
}
static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct block2mtd_dev *dev = mtd->priv;
int err;
mutex_lock(&dev->write_mutex);
err = _block2mtd_write(dev, buf, to, len, retlen);
mutex_unlock(&dev->write_mutex);
if (err > 0)
err = 0;
return err;
}
/* sync the device - wait until the write queue is empty */
static void block2mtd_sync(struct mtd_info *mtd)
{
struct block2mtd_dev *dev = mtd->priv;
sync_blockdev(dev->blkdev);
return;
}
static void block2mtd_free_device(struct block2mtd_dev *dev)
{
if (!dev)
return;
kfree(dev->mtd.name);
if (dev->blkdev) {
invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
0, -1);
blkdev_put(dev->blkdev, NULL);
}
kfree(dev);
}
/*
* This function is marked __ref because it calls the __init marked
* early_lookup_bdev when called from the early boot code.
*/
static struct block_device __ref *mdtblock_early_get_bdev(const char *devname,
blk_mode_t mode, int timeout, struct block2mtd_dev *dev)
{
struct block_device *bdev = ERR_PTR(-ENODEV);
#ifndef MODULE
int i;
/*
* We can't use early_lookup_bdev from a running system.
*/
if (system_state >= SYSTEM_RUNNING)
return bdev;
/*
* We might not have the root device mounted at this point.
* Try to resolve the device name by other means.
*/
for (i = 0; i <= timeout; i++) {
dev_t devt;
if (i)
/*
* Calling wait_for_device_probe in the first loop
* was not enough, sleep for a bit in subsequent
* go-arounds.
*/
msleep(1000);
wait_for_device_probe();
if (!early_lookup_bdev(devname, &devt)) {
bdev = blkdev_get_by_dev(devt, mode, dev, NULL);
if (!IS_ERR(bdev))
break;
}
}
#endif
return bdev;
}
static struct block2mtd_dev *add_device(char *devname, int erase_size,
char *label, int timeout)
{
const blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE;
struct block_device *bdev;
struct block2mtd_dev *dev;
char *name;
if (!devname)
return NULL;
dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
if (!dev)
return NULL;
/* Get a handle on the device */
bdev = blkdev_get_by_path(devname, mode, dev, NULL);
if (IS_ERR(bdev))
bdev = mdtblock_early_get_bdev(devname, mode, timeout, dev);
if (IS_ERR(bdev)) {
pr_err("error: cannot open device %s\n", devname);
goto err_free_block2mtd;
}
dev->blkdev = bdev;
if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
pr_err("attempting to use an MTD device as a block device\n");
goto err_free_block2mtd;
}
if ((long)dev->blkdev->bd_inode->i_size % erase_size) {
pr_err("erasesize must be a divisor of device size\n");
goto err_free_block2mtd;
}
mutex_init(&dev->write_mutex);
/* Setup the MTD structure */
/* make the name contain the block device in */
if (!label)
name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
else
name = kstrdup(label, GFP_KERNEL);
if (!name)
goto err_destroy_mutex;
dev->mtd.name = name;
dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
dev->mtd.erasesize = erase_size;
dev->mtd.writesize = 1;
dev->mtd.writebufsize = PAGE_SIZE;
dev->mtd.type = MTD_RAM;
dev->mtd.flags = MTD_CAP_RAM;
dev->mtd._erase = block2mtd_erase;
dev->mtd._write = block2mtd_write;
dev->mtd._sync = block2mtd_sync;
dev->mtd._read = block2mtd_read;
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
if (mtd_device_register(&dev->mtd, NULL, 0)) {
/* Device didn't get added, so free the entry */
goto err_destroy_mutex;
}
list_add(&dev->list, &blkmtd_device_list);
pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
dev->mtd.index,
label ? label : dev->mtd.name + strlen("block2mtd: "),
dev->mtd.erasesize >> 10, dev->mtd.erasesize);
return dev;
err_destroy_mutex:
mutex_destroy(&dev->write_mutex);
err_free_block2mtd:
block2mtd_free_device(dev);
return NULL;
}
/* This function works similar to reguler strtoul. In addition, it
* allows some suffixes for a more human-readable number format:
* ki, Ki, kiB, KiB - multiply result with 1024
* Mi, MiB - multiply result with 1024^2
* Gi, GiB - multiply result with 1024^3
*/
static int ustrtoul(const char *cp, char **endp, unsigned int base)
{
unsigned long result = simple_strtoul(cp, endp, base);
switch (**endp) {
case 'G' :
result *= 1024;
fallthrough;
case 'M':
result *= 1024;
fallthrough;
case 'K':
case 'k':
result *= 1024;
/* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
if ((*endp)[1] == 'i') {
if ((*endp)[2] == 'B')
(*endp) += 3;
else
(*endp) += 2;
}
}
return result;
}
static int parse_num(size_t *num, const char *token)
{
char *endp;
size_t n;
n = (size_t) ustrtoul(token, &endp, 0);
if (*endp)
return -EINVAL;
*num = n;
return 0;
}
static inline void kill_final_newline(char *str)
{
char *newline = strrchr(str, '\n');
if (newline && !newline[1])
*newline = 0;
}
#ifndef MODULE
static int block2mtd_init_called = 0;
/* 80 for device, 12 for erase size */
static char block2mtd_paramline[80 + 12];
#endif
static int block2mtd_setup2(const char *val)
{
/* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
char buf[80 + 12 + 80 + 8];
char *str = buf;
char *token[BLOCK2MTD_PARAM_MAX_COUNT];
char *name;
char *label = NULL;
size_t erase_size = PAGE_SIZE;
unsigned long timeout = MTD_DEFAULT_TIMEOUT;
int i, ret;
if (strnlen(val, sizeof(buf)) >= sizeof(buf)) {
pr_err("parameter too long\n");
return 0;
}
strcpy(str, val);
kill_final_newline(str);
for (i = 0; i < BLOCK2MTD_PARAM_MAX_COUNT; i++)
token[i] = strsep(&str, ",");
if (str) {
pr_err("too many arguments\n");
return 0;
}
if (!token[0]) {
pr_err("no argument\n");
return 0;
}
name = token[0];
if (strlen(name) + 1 > 80) {
pr_err("device name too long\n");
return 0;
}
/* Optional argument when custom label is used */
if (token[1] && strlen(token[1])) {
ret = parse_num(&erase_size, token[1]);
if (ret) {
pr_err("illegal erase size\n");
return 0;
}
}
if (token[2]) {
label = token[2];
pr_info("Using custom MTD label '%s' for dev %s\n", label, name);
}
add_device(name, erase_size, label, timeout);
return 0;
}
static int block2mtd_setup(const char *val, const struct kernel_param *kp)
{
#ifdef MODULE
return block2mtd_setup2(val);
#else
/* If more parameters are later passed in via
/sys/module/block2mtd/parameters/block2mtd
and block2mtd_init() has already been called,
we can parse the argument now. */
if (block2mtd_init_called)
return block2mtd_setup2(val);
/* During early boot stage, we only save the parameters
here. We must parse them later: if the param passed
from kernel boot command line, block2mtd_setup() is
called so early that it is not possible to resolve
the device (even kmalloc() fails). Deter that work to
block2mtd_setup2(). */
strscpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
return 0;
#endif
}
module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,[<erasesize>][,<label>]]\"");
static int __init block2mtd_init(void)
{
int ret = 0;
#ifndef MODULE
if (strlen(block2mtd_paramline))
ret = block2mtd_setup2(block2mtd_paramline);
block2mtd_init_called = 1;
#endif
return ret;
}
static void block2mtd_exit(void)
{
struct list_head *pos, *next;
/* Remove the MTD devices */
list_for_each_safe(pos, next, &blkmtd_device_list) {
struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
block2mtd_sync(&dev->mtd);
mtd_device_unregister(&dev->mtd);
mutex_destroy(&dev->write_mutex);
pr_info("mtd%d: [%s] removed\n",
dev->mtd.index,
dev->mtd.name + strlen("block2mtd: "));
list_del(&dev->list);
block2mtd_free_device(dev);
}
}
late_initcall(block2mtd_init);
module_exit(block2mtd_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Joern Engel <[email protected]>");
MODULE_DESCRIPTION("Emulate an MTD using a block device");
| linux-master | drivers/mtd/devices/block2mtd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Microchip 48L640 64 Kb SPI Serial EERAM
*
* Copyright Heiko Schocher <[email protected]>
*
* datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/20006055B.pdf
*
* we set continuous mode but reading/writing more bytes than
* pagesize seems to bring chip into state where readden values
* are wrong ... no idea why.
*
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/sizes.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
#include <linux/of.h>
struct mchp48_caps {
unsigned int size;
unsigned int page_size;
};
struct mchp48l640_flash {
struct spi_device *spi;
struct mutex lock;
struct mtd_info mtd;
const struct mchp48_caps *caps;
};
#define MCHP48L640_CMD_WREN 0x06
#define MCHP48L640_CMD_WRDI 0x04
#define MCHP48L640_CMD_WRITE 0x02
#define MCHP48L640_CMD_READ 0x03
#define MCHP48L640_CMD_WRSR 0x01
#define MCHP48L640_CMD_RDSR 0x05
#define MCHP48L640_STATUS_RDY 0x01
#define MCHP48L640_STATUS_WEL 0x02
#define MCHP48L640_STATUS_BP0 0x04
#define MCHP48L640_STATUS_BP1 0x08
#define MCHP48L640_STATUS_SWM 0x10
#define MCHP48L640_STATUS_PRO 0x20
#define MCHP48L640_STATUS_ASE 0x40
#define MCHP48L640_TIMEOUT 100
#define MAX_CMD_SIZE 0x10
#define to_mchp48l640_flash(x) container_of(x, struct mchp48l640_flash, mtd)
static int mchp48l640_mkcmd(struct mchp48l640_flash *flash, u8 cmd, loff_t addr, char *buf)
{
buf[0] = cmd;
buf[1] = addr >> 8;
buf[2] = addr;
return 3;
}
static int mchp48l640_read_status(struct mchp48l640_flash *flash, int *status)
{
unsigned char cmd[2];
int ret;
cmd[0] = MCHP48L640_CMD_RDSR;
cmd[1] = 0x00;
mutex_lock(&flash->lock);
ret = spi_write_then_read(flash->spi, &cmd[0], 1, &cmd[1], 1);
mutex_unlock(&flash->lock);
if (!ret)
*status = cmd[1];
dev_dbg(&flash->spi->dev, "read status ret: %d status: %x", ret, *status);
return ret;
}
static int mchp48l640_waitforbit(struct mchp48l640_flash *flash, int bit, bool set)
{
int ret, status;
unsigned long deadline;
deadline = jiffies + msecs_to_jiffies(MCHP48L640_TIMEOUT);
do {
ret = mchp48l640_read_status(flash, &status);
dev_dbg(&flash->spi->dev, "read status ret: %d bit: %x %sset status: %x",
ret, bit, (set ? "" : "not"), status);
if (ret)
return ret;
if (set) {
if ((status & bit) == bit)
return 0;
} else {
if ((status & bit) == 0)
return 0;
}
usleep_range(1000, 2000);
} while (!time_after_eq(jiffies, deadline));
dev_err(&flash->spi->dev, "Timeout waiting for bit %x %s set in status register.",
bit, (set ? "" : "not"));
return -ETIMEDOUT;
}
static int mchp48l640_write_prepare(struct mchp48l640_flash *flash, bool enable)
{
unsigned char cmd[2];
int ret;
if (enable)
cmd[0] = MCHP48L640_CMD_WREN;
else
cmd[0] = MCHP48L640_CMD_WRDI;
mutex_lock(&flash->lock);
ret = spi_write(flash->spi, cmd, 1);
mutex_unlock(&flash->lock);
if (ret)
dev_err(&flash->spi->dev, "write %sable failed ret: %d",
(enable ? "en" : "dis"), ret);
dev_dbg(&flash->spi->dev, "write %sable success ret: %d",
(enable ? "en" : "dis"), ret);
if (enable)
return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, true);
return ret;
}
static int mchp48l640_set_mode(struct mchp48l640_flash *flash)
{
unsigned char cmd[2];
int ret;
ret = mchp48l640_write_prepare(flash, true);
if (ret)
return ret;
cmd[0] = MCHP48L640_CMD_WRSR;
cmd[1] = MCHP48L640_STATUS_PRO;
mutex_lock(&flash->lock);
ret = spi_write(flash->spi, cmd, 2);
mutex_unlock(&flash->lock);
if (ret)
dev_err(&flash->spi->dev, "Could not set continuous mode ret: %d", ret);
return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_PRO, true);
}
static int mchp48l640_wait_rdy(struct mchp48l640_flash *flash)
{
return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_RDY, false);
};
static int mchp48l640_write_page(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const unsigned char *buf)
{
struct mchp48l640_flash *flash = to_mchp48l640_flash(mtd);
unsigned char *cmd;
int ret;
int cmdlen;
cmd = kmalloc((3 + len), GFP_KERNEL | GFP_DMA);
if (!cmd)
return -ENOMEM;
ret = mchp48l640_wait_rdy(flash);
if (ret)
goto fail;
ret = mchp48l640_write_prepare(flash, true);
if (ret)
goto fail;
mutex_lock(&flash->lock);
cmdlen = mchp48l640_mkcmd(flash, MCHP48L640_CMD_WRITE, to, cmd);
memcpy(&cmd[cmdlen], buf, len);
ret = spi_write(flash->spi, cmd, cmdlen + len);
mutex_unlock(&flash->lock);
if (!ret)
*retlen += len;
else
goto fail;
ret = mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, false);
if (ret)
goto fail;
kfree(cmd);
return 0;
fail:
kfree(cmd);
dev_err(&flash->spi->dev, "write fail with: %d", ret);
return ret;
};
static int mchp48l640_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const unsigned char *buf)
{
struct mchp48l640_flash *flash = to_mchp48l640_flash(mtd);
int ret;
size_t wlen = 0;
loff_t woff = to;
size_t ws;
size_t page_sz = flash->caps->page_size;
/*
* we set PRO bit (page rollover), but writing length > page size
* does result in total chaos, so write in 32 byte chunks.
*/
while (wlen < len) {
ws = min((len - wlen), page_sz);
ret = mchp48l640_write_page(mtd, woff, ws, retlen, &buf[wlen]);
if (ret)
return ret;
wlen += ws;
woff += ws;
}
return 0;
}
static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, unsigned char *buf)
{
struct mchp48l640_flash *flash = to_mchp48l640_flash(mtd);
unsigned char *cmd;
int ret;
int cmdlen;
cmd = kmalloc((3 + len), GFP_KERNEL | GFP_DMA);
if (!cmd)
return -ENOMEM;
ret = mchp48l640_wait_rdy(flash);
if (ret)
goto fail;
mutex_lock(&flash->lock);
cmdlen = mchp48l640_mkcmd(flash, MCHP48L640_CMD_READ, from, cmd);
ret = spi_write_then_read(flash->spi, cmd, cmdlen, buf, len);
mutex_unlock(&flash->lock);
if (!ret)
*retlen += len;
kfree(cmd);
return ret;
fail:
kfree(cmd);
dev_err(&flash->spi->dev, "read fail with: %d", ret);
return ret;
}
static int mchp48l640_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, unsigned char *buf)
{
struct mchp48l640_flash *flash = to_mchp48l640_flash(mtd);
int ret;
size_t wlen = 0;
loff_t woff = from;
size_t ws;
size_t page_sz = flash->caps->page_size;
/*
* we set PRO bit (page rollover), but if read length > page size
* does result in total chaos in result ...
*/
while (wlen < len) {
ws = min((len - wlen), page_sz);
ret = mchp48l640_read_page(mtd, woff, ws, retlen, &buf[wlen]);
if (ret)
return ret;
wlen += ws;
woff += ws;
}
return 0;
};
static const struct mchp48_caps mchp48l640_caps = {
.size = SZ_8K,
.page_size = 32,
};
static int mchp48l640_probe(struct spi_device *spi)
{
struct mchp48l640_flash *flash;
struct flash_platform_data *data;
int err;
int status;
flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
if (!flash)
return -ENOMEM;
flash->spi = spi;
mutex_init(&flash->lock);
spi_set_drvdata(spi, flash);
err = mchp48l640_read_status(flash, &status);
if (err)
return err;
err = mchp48l640_set_mode(flash);
if (err)
return err;
data = dev_get_platdata(&spi->dev);
flash->caps = of_device_get_match_data(&spi->dev);
if (!flash->caps)
flash->caps = &mchp48l640_caps;
mtd_set_of_node(&flash->mtd, spi->dev.of_node);
flash->mtd.dev.parent = &spi->dev;
flash->mtd.type = MTD_RAM;
flash->mtd.flags = MTD_CAP_RAM;
flash->mtd.writesize = flash->caps->page_size;
flash->mtd.size = flash->caps->size;
flash->mtd._read = mchp48l640_read;
flash->mtd._write = mchp48l640_write;
err = mtd_device_register(&flash->mtd, data ? data->parts : NULL,
data ? data->nr_parts : 0);
if (err)
return err;
return 0;
}
static void mchp48l640_remove(struct spi_device *spi)
{
struct mchp48l640_flash *flash = spi_get_drvdata(spi);
WARN_ON(mtd_device_unregister(&flash->mtd));
}
static const struct of_device_id mchp48l640_of_table[] = {
{
.compatible = "microchip,48l640",
.data = &mchp48l640_caps,
},
{}
};
MODULE_DEVICE_TABLE(of, mchp48l640_of_table);
static const struct spi_device_id mchp48l640_spi_ids[] = {
{
.name = "48l640",
.driver_data = (kernel_ulong_t)&mchp48l640_caps,
},
{}
};
MODULE_DEVICE_TABLE(spi, mchp48l640_spi_ids);
static struct spi_driver mchp48l640_driver = {
.driver = {
.name = "mchp48l640",
.of_match_table = mchp48l640_of_table,
},
.probe = mchp48l640_probe,
.remove = mchp48l640_remove,
.id_table = mchp48l640_spi_ids,
};
module_spi_driver(mchp48l640_driver);
MODULE_DESCRIPTION("MTD SPI driver for Microchip 48l640 EERAM chips");
MODULE_AUTHOR("Heiko Schocher <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("spi:mchp48l640");
| linux-master | drivers/mtd/devices/mchp48l640.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2001 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
#include <asm/dec/ioasic_addrs.h>
#include <asm/dec/kn02.h>
#include <asm/dec/kn03.h>
#include <asm/io.h>
#include <asm/paccess.h>
#include "ms02-nv.h"
static char version[] __initdata =
"ms02-nv.c: v.1.0.0 13 Aug 2001 Maciej W. Rozycki.\n";
MODULE_AUTHOR("Maciej W. Rozycki <[email protected]>");
MODULE_DESCRIPTION("DEC MS02-NV NVRAM module driver");
MODULE_LICENSE("GPL");
/*
* Addresses we probe for an MS02-NV at. Modules may be located
* at any 8MiB boundary within a 0MiB up to 112MiB range or at any 32MiB
* boundary within a 0MiB up to 448MiB range. We don't support a module
* at 0MiB, though.
*/
static ulong ms02nv_addrs[] __initdata = {
0x07000000, 0x06800000, 0x06000000, 0x05800000, 0x05000000,
0x04800000, 0x04000000, 0x03800000, 0x03000000, 0x02800000,
0x02000000, 0x01800000, 0x01000000, 0x00800000
};
static const char ms02nv_name[] = "DEC MS02-NV NVRAM";
static const char ms02nv_res_diag_ram[] = "Diagnostic RAM";
static const char ms02nv_res_user_ram[] = "General-purpose RAM";
static const char ms02nv_res_csr[] = "Control and status register";
static struct mtd_info *root_ms02nv_mtd;
static int ms02nv_read(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct ms02nv_private *mp = mtd->priv;
memcpy(buf, mp->uaddr + from, len);
*retlen = len;
return 0;
}
static int ms02nv_write(struct mtd_info *mtd, loff_t to,
size_t len, size_t *retlen, const u_char *buf)
{
struct ms02nv_private *mp = mtd->priv;
memcpy(mp->uaddr + to, buf, len);
*retlen = len;
return 0;
}
static inline uint ms02nv_probe_one(ulong addr)
{
ms02nv_uint *ms02nv_diagp;
ms02nv_uint *ms02nv_magicp;
uint ms02nv_diag;
uint ms02nv_magic;
size_t size;
int err;
/*
* The firmware writes MS02NV_ID at MS02NV_MAGIC and also
* a diagnostic status at MS02NV_DIAG.
*/
ms02nv_diagp = (ms02nv_uint *)(CKSEG1ADDR(addr + MS02NV_DIAG));
ms02nv_magicp = (ms02nv_uint *)(CKSEG1ADDR(addr + MS02NV_MAGIC));
err = get_dbe(ms02nv_magic, ms02nv_magicp);
if (err)
return 0;
if (ms02nv_magic != MS02NV_ID)
return 0;
ms02nv_diag = *ms02nv_diagp;
size = (ms02nv_diag & MS02NV_DIAG_SIZE_MASK) << MS02NV_DIAG_SIZE_SHIFT;
if (size > MS02NV_CSR)
size = MS02NV_CSR;
return size;
}
static int __init ms02nv_init_one(ulong addr)
{
struct mtd_info *mtd;
struct ms02nv_private *mp;
struct resource *mod_res;
struct resource *diag_res;
struct resource *user_res;
struct resource *csr_res;
ulong fixaddr;
size_t size, fixsize;
static int version_printed;
int ret = -ENODEV;
/* The module decodes 8MiB of address space. */
mod_res = kzalloc(sizeof(*mod_res), GFP_KERNEL);
if (!mod_res)
return -ENOMEM;
mod_res->name = ms02nv_name;
mod_res->start = addr;
mod_res->end = addr + MS02NV_SLOT_SIZE - 1;
mod_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, mod_res) < 0)
goto err_out_mod_res;
size = ms02nv_probe_one(addr);
if (!size)
goto err_out_mod_res_rel;
if (!version_printed) {
printk(KERN_INFO "%s", version);
version_printed = 1;
}
ret = -ENOMEM;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
if (!mtd)
goto err_out_mod_res_rel;
mp = kzalloc(sizeof(*mp), GFP_KERNEL);
if (!mp)
goto err_out_mtd;
mtd->priv = mp;
mp->resource.module = mod_res;
/* Firmware's diagnostic NVRAM area. */
diag_res = kzalloc(sizeof(*diag_res), GFP_KERNEL);
if (!diag_res)
goto err_out_mp;
diag_res->name = ms02nv_res_diag_ram;
diag_res->start = addr;
diag_res->end = addr + MS02NV_RAM - 1;
diag_res->flags = IORESOURCE_BUSY;
request_resource(mod_res, diag_res);
mp->resource.diag_ram = diag_res;
/* User-available general-purpose NVRAM area. */
user_res = kzalloc(sizeof(*user_res), GFP_KERNEL);
if (!user_res)
goto err_out_diag_res;
user_res->name = ms02nv_res_user_ram;
user_res->start = addr + MS02NV_RAM;
user_res->end = addr + size - 1;
user_res->flags = IORESOURCE_BUSY;
request_resource(mod_res, user_res);
mp->resource.user_ram = user_res;
/* Control and status register. */
csr_res = kzalloc(sizeof(*csr_res), GFP_KERNEL);
if (!csr_res)
goto err_out_user_res;
csr_res->name = ms02nv_res_csr;
csr_res->start = addr + MS02NV_CSR;
csr_res->end = addr + MS02NV_CSR + 3;
csr_res->flags = IORESOURCE_BUSY;
request_resource(mod_res, csr_res);
mp->resource.csr = csr_res;
mp->addr = phys_to_virt(addr);
mp->size = size;
/*
* Hide the firmware's diagnostic area. It may get destroyed
* upon a reboot. Take paging into account for mapping support.
*/
fixaddr = (addr + MS02NV_RAM + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
fixsize = (size - (fixaddr - addr)) & ~(PAGE_SIZE - 1);
mp->uaddr = phys_to_virt(fixaddr);
mtd->type = MTD_RAM;
mtd->flags = MTD_CAP_RAM;
mtd->size = fixsize;
mtd->name = ms02nv_name;
mtd->owner = THIS_MODULE;
mtd->_read = ms02nv_read;
mtd->_write = ms02nv_write;
mtd->writesize = 1;
ret = -EIO;
if (mtd_device_register(mtd, NULL, 0)) {
printk(KERN_ERR
"ms02-nv: Unable to register MTD device, aborting!\n");
goto err_out_csr_res;
}
printk(KERN_INFO "mtd%d: %s at 0x%08lx, size %zuMiB.\n",
mtd->index, ms02nv_name, addr, size >> 20);
mp->next = root_ms02nv_mtd;
root_ms02nv_mtd = mtd;
return 0;
err_out_csr_res:
release_resource(csr_res);
kfree(csr_res);
err_out_user_res:
release_resource(user_res);
kfree(user_res);
err_out_diag_res:
release_resource(diag_res);
kfree(diag_res);
err_out_mp:
kfree(mp);
err_out_mtd:
kfree(mtd);
err_out_mod_res_rel:
release_resource(mod_res);
err_out_mod_res:
kfree(mod_res);
return ret;
}
static void __exit ms02nv_remove_one(void)
{
struct mtd_info *mtd = root_ms02nv_mtd;
struct ms02nv_private *mp = mtd->priv;
root_ms02nv_mtd = mp->next;
mtd_device_unregister(mtd);
release_resource(mp->resource.csr);
kfree(mp->resource.csr);
release_resource(mp->resource.user_ram);
kfree(mp->resource.user_ram);
release_resource(mp->resource.diag_ram);
kfree(mp->resource.diag_ram);
release_resource(mp->resource.module);
kfree(mp->resource.module);
kfree(mp);
kfree(mtd);
}
static int __init ms02nv_init(void)
{
volatile u32 *csr;
uint stride = 0;
int count = 0;
int i;
switch (mips_machtype) {
case MACH_DS5000_200:
csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR);
if (*csr & KN02_CSR_BNK32M)
stride = 2;
break;
case MACH_DS5000_2X0:
case MACH_DS5900:
csr = (volatile u32 *)CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_MCR);
if (*csr & KN03_MCR_BNK32M)
stride = 2;
break;
default:
return -ENODEV;
}
for (i = 0; i < ARRAY_SIZE(ms02nv_addrs); i++)
if (!ms02nv_init_one(ms02nv_addrs[i] << stride))
count++;
return (count > 0) ? 0 : -ENODEV;
}
static void __exit ms02nv_cleanup(void)
{
while (root_ms02nv_mtd)
ms02nv_remove_one();
}
module_init(ms02nv_init);
module_exit(ms02nv_cleanup);
| linux-master | drivers/mtd/devices/ms02-nv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* sst25l.c
*
* Driver for SST25L SPI Flash chips
*
* Copyright © 2009 Bluewater Systems Ltd
* Author: Andre Renaud <[email protected]>
* Author: Ryan Mallon
*
* Based on m25p80.c
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
/* Erases can take up to 3 seconds! */
#define MAX_READY_WAIT_JIFFIES msecs_to_jiffies(3000)
#define SST25L_CMD_WRSR 0x01 /* Write status register */
#define SST25L_CMD_WRDI 0x04 /* Write disable */
#define SST25L_CMD_RDSR 0x05 /* Read status register */
#define SST25L_CMD_WREN 0x06 /* Write enable */
#define SST25L_CMD_READ 0x03 /* High speed read */
#define SST25L_CMD_EWSR 0x50 /* Enable write status register */
#define SST25L_CMD_SECTOR_ERASE 0x20 /* Erase sector */
#define SST25L_CMD_READ_ID 0x90 /* Read device ID */
#define SST25L_CMD_AAI_PROGRAM 0xaf /* Auto address increment */
#define SST25L_STATUS_BUSY (1 << 0) /* Chip is busy */
#define SST25L_STATUS_WREN (1 << 1) /* Write enabled */
#define SST25L_STATUS_BP0 (1 << 2) /* Block protection 0 */
#define SST25L_STATUS_BP1 (1 << 3) /* Block protection 1 */
struct sst25l_flash {
struct spi_device *spi;
struct mutex lock;
struct mtd_info mtd;
};
struct flash_info {
const char *name;
uint16_t device_id;
unsigned page_size;
unsigned nr_pages;
unsigned erase_size;
};
#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd)
static struct flash_info sst25l_flash_info[] = {
{"sst25lf020a", 0xbf43, 256, 1024, 4096},
{"sst25lf040a", 0xbf44, 256, 2048, 4096},
};
static int sst25l_status(struct sst25l_flash *flash, int *status)
{
struct spi_message m;
struct spi_transfer t;
unsigned char cmd_resp[2];
int err;
spi_message_init(&m);
memset(&t, 0, sizeof(struct spi_transfer));
cmd_resp[0] = SST25L_CMD_RDSR;
cmd_resp[1] = 0xff;
t.tx_buf = cmd_resp;
t.rx_buf = cmd_resp;
t.len = sizeof(cmd_resp);
spi_message_add_tail(&t, &m);
err = spi_sync(flash->spi, &m);
if (err < 0)
return err;
*status = cmd_resp[1];
return 0;
}
static int sst25l_write_enable(struct sst25l_flash *flash, int enable)
{
unsigned char command[2];
int status, err;
command[0] = enable ? SST25L_CMD_WREN : SST25L_CMD_WRDI;
err = spi_write(flash->spi, command, 1);
if (err)
return err;
command[0] = SST25L_CMD_EWSR;
err = spi_write(flash->spi, command, 1);
if (err)
return err;
command[0] = SST25L_CMD_WRSR;
command[1] = enable ? 0 : SST25L_STATUS_BP0 | SST25L_STATUS_BP1;
err = spi_write(flash->spi, command, 2);
if (err)
return err;
if (enable) {
err = sst25l_status(flash, &status);
if (err)
return err;
if (!(status & SST25L_STATUS_WREN))
return -EROFS;
}
return 0;
}
static int sst25l_wait_till_ready(struct sst25l_flash *flash)
{
unsigned long deadline;
int status, err;
deadline = jiffies + MAX_READY_WAIT_JIFFIES;
do {
err = sst25l_status(flash, &status);
if (err)
return err;
if (!(status & SST25L_STATUS_BUSY))
return 0;
cond_resched();
} while (!time_after_eq(jiffies, deadline));
return -ETIMEDOUT;
}
static int sst25l_erase_sector(struct sst25l_flash *flash, uint32_t offset)
{
unsigned char command[4];
int err;
err = sst25l_write_enable(flash, 1);
if (err)
return err;
command[0] = SST25L_CMD_SECTOR_ERASE;
command[1] = offset >> 16;
command[2] = offset >> 8;
command[3] = offset;
err = spi_write(flash->spi, command, 4);
if (err)
return err;
err = sst25l_wait_till_ready(flash);
if (err)
return err;
return sst25l_write_enable(flash, 0);
}
static int sst25l_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct sst25l_flash *flash = to_sst25l_flash(mtd);
uint32_t addr, end;
int err;
/* Sanity checks */
if ((uint32_t)instr->len % mtd->erasesize)
return -EINVAL;
if ((uint32_t)instr->addr % mtd->erasesize)
return -EINVAL;
addr = instr->addr;
end = addr + instr->len;
mutex_lock(&flash->lock);
err = sst25l_wait_till_ready(flash);
if (err) {
mutex_unlock(&flash->lock);
return err;
}
while (addr < end) {
err = sst25l_erase_sector(flash, addr);
if (err) {
mutex_unlock(&flash->lock);
dev_err(&flash->spi->dev, "Erase failed\n");
return err;
}
addr += mtd->erasesize;
}
mutex_unlock(&flash->lock);
return 0;
}
static int sst25l_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, unsigned char *buf)
{
struct sst25l_flash *flash = to_sst25l_flash(mtd);
struct spi_transfer transfer[2];
struct spi_message message;
unsigned char command[4];
int ret;
spi_message_init(&message);
memset(&transfer, 0, sizeof(transfer));
command[0] = SST25L_CMD_READ;
command[1] = from >> 16;
command[2] = from >> 8;
command[3] = from;
transfer[0].tx_buf = command;
transfer[0].len = sizeof(command);
spi_message_add_tail(&transfer[0], &message);
transfer[1].rx_buf = buf;
transfer[1].len = len;
spi_message_add_tail(&transfer[1], &message);
mutex_lock(&flash->lock);
/* Wait for previous write/erase to complete */
ret = sst25l_wait_till_ready(flash);
if (ret) {
mutex_unlock(&flash->lock);
return ret;
}
spi_sync(flash->spi, &message);
if (retlen && message.actual_length > sizeof(command))
*retlen += message.actual_length - sizeof(command);
mutex_unlock(&flash->lock);
return 0;
}
static int sst25l_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const unsigned char *buf)
{
struct sst25l_flash *flash = to_sst25l_flash(mtd);
int i, j, ret, bytes, copied = 0;
unsigned char command[5];
if ((uint32_t)to % mtd->writesize)
return -EINVAL;
mutex_lock(&flash->lock);
ret = sst25l_write_enable(flash, 1);
if (ret)
goto out;
for (i = 0; i < len; i += mtd->writesize) {
ret = sst25l_wait_till_ready(flash);
if (ret)
goto out;
/* Write the first byte of the page */
command[0] = SST25L_CMD_AAI_PROGRAM;
command[1] = (to + i) >> 16;
command[2] = (to + i) >> 8;
command[3] = (to + i);
command[4] = buf[i];
ret = spi_write(flash->spi, command, 5);
if (ret < 0)
goto out;
copied++;
/*
* Write the remaining bytes using auto address
* increment mode
*/
bytes = min_t(uint32_t, mtd->writesize, len - i);
for (j = 1; j < bytes; j++, copied++) {
ret = sst25l_wait_till_ready(flash);
if (ret)
goto out;
command[1] = buf[i + j];
ret = spi_write(flash->spi, command, 2);
if (ret)
goto out;
}
}
out:
ret = sst25l_write_enable(flash, 0);
if (retlen)
*retlen = copied;
mutex_unlock(&flash->lock);
return ret;
}
static struct flash_info *sst25l_match_device(struct spi_device *spi)
{
struct flash_info *flash_info = NULL;
struct spi_message m;
struct spi_transfer t;
unsigned char cmd_resp[6];
int i, err;
uint16_t id;
spi_message_init(&m);
memset(&t, 0, sizeof(struct spi_transfer));
cmd_resp[0] = SST25L_CMD_READ_ID;
cmd_resp[1] = 0;
cmd_resp[2] = 0;
cmd_resp[3] = 0;
cmd_resp[4] = 0xff;
cmd_resp[5] = 0xff;
t.tx_buf = cmd_resp;
t.rx_buf = cmd_resp;
t.len = sizeof(cmd_resp);
spi_message_add_tail(&t, &m);
err = spi_sync(spi, &m);
if (err < 0) {
dev_err(&spi->dev, "error reading device id\n");
return NULL;
}
id = (cmd_resp[4] << 8) | cmd_resp[5];
for (i = 0; i < ARRAY_SIZE(sst25l_flash_info); i++)
if (sst25l_flash_info[i].device_id == id)
flash_info = &sst25l_flash_info[i];
if (!flash_info)
dev_err(&spi->dev, "unknown id %.4x\n", id);
return flash_info;
}
static int sst25l_probe(struct spi_device *spi)
{
struct flash_info *flash_info;
struct sst25l_flash *flash;
struct flash_platform_data *data;
int ret;
flash_info = sst25l_match_device(spi);
if (!flash_info)
return -ENODEV;
flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
if (!flash)
return -ENOMEM;
flash->spi = spi;
mutex_init(&flash->lock);
spi_set_drvdata(spi, flash);
data = dev_get_platdata(&spi->dev);
if (data && data->name)
flash->mtd.name = data->name;
flash->mtd.dev.parent = &spi->dev;
flash->mtd.type = MTD_NORFLASH;
flash->mtd.flags = MTD_CAP_NORFLASH;
flash->mtd.erasesize = flash_info->erase_size;
flash->mtd.writesize = flash_info->page_size;
flash->mtd.writebufsize = flash_info->page_size;
flash->mtd.size = flash_info->page_size * flash_info->nr_pages;
flash->mtd._erase = sst25l_erase;
flash->mtd._read = sst25l_read;
flash->mtd._write = sst25l_write;
dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name,
(long long)flash->mtd.size >> 10);
pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) "
".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
flash->mtd.name,
(long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
flash->mtd.erasesize, flash->mtd.erasesize / 1024,
flash->mtd.numeraseregions);
ret = mtd_device_register(&flash->mtd, data ? data->parts : NULL,
data ? data->nr_parts : 0);
if (ret)
return -ENODEV;
return 0;
}
static void sst25l_remove(struct spi_device *spi)
{
struct sst25l_flash *flash = spi_get_drvdata(spi);
WARN_ON(mtd_device_unregister(&flash->mtd));
}
static struct spi_driver sst25l_driver = {
.driver = {
.name = "sst25l",
},
.probe = sst25l_probe,
.remove = sst25l_remove,
};
module_spi_driver(sst25l_driver);
MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips");
MODULE_AUTHOR("Andre Renaud <[email protected]>, "
"Ryan Mallon");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/devices/sst25l.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/mtd/spi-nor.h>
#include "core.h"
static const struct flash_info catalyst_nor_parts[] = {
/* Catalyst / On Semiconductor -- non-JEDEC */
{ "cat25c11", CAT25_INFO(16, 8, 16, 1) },
{ "cat25c03", CAT25_INFO(32, 8, 16, 2) },
{ "cat25c09", CAT25_INFO(128, 8, 32, 2) },
{ "cat25c17", CAT25_INFO(256, 8, 32, 2) },
{ "cat25128", CAT25_INFO(2048, 8, 64, 2) },
};
const struct spi_nor_manufacturer spi_nor_catalyst = {
.name = "catalyst",
.parts = catalyst_nor_parts,
.nparts = ARRAY_SIZE(catalyst_nor_parts),
};
| linux-master | drivers/mtd/spi-nor/catalyst.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/mtd/spi-nor.h>
#include "core.h"
static const struct flash_info intel_nor_parts[] = {
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "320s33b", INFO(0x898912, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
};
const struct spi_nor_manufacturer spi_nor_intel = {
.name = "intel",
.parts = intel_nor_parts,
.nparts = ARRAY_SIZE(intel_nor_parts),
};
| linux-master | drivers/mtd/spi-nor/intel.c |
// SPDX-License-Identifier: GPL-2.0
/*
* OTP support for SPI NOR flashes
*
* Copyright (C) 2021 Michael Walle <[email protected]>
*/
#include <linux/log2.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/spi-nor.h>
#include "core.h"
#define spi_nor_otp_region_len(nor) ((nor)->params->otp.org->len)
#define spi_nor_otp_n_regions(nor) ((nor)->params->otp.org->n_regions)
/**
* spi_nor_otp_read_secr() - read security register
* @nor: pointer to 'struct spi_nor'
* @addr: offset to read from
* @len: number of bytes to read
* @buf: pointer to dst buffer
*
* Read a security register by using the SPINOR_OP_RSECR commands.
*
* In Winbond/GigaDevice datasheets the term "security register" stands for
* an one-time-programmable memory area, consisting of multiple bytes (usually
* 256). Thus one "security register" maps to one OTP region.
*
* This method is used on GigaDevice and Winbond flashes.
*
* Please note, the read must not span multiple registers.
*
* Return: number of bytes read successfully, -errno otherwise
*/
int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf)
{
u8 addr_nbytes, read_opcode, read_dummy;
struct spi_mem_dirmap_desc *rdesc;
enum spi_nor_protocol read_proto;
int ret;
read_opcode = nor->read_opcode;
addr_nbytes = nor->addr_nbytes;
read_dummy = nor->read_dummy;
read_proto = nor->read_proto;
rdesc = nor->dirmap.rdesc;
nor->read_opcode = SPINOR_OP_RSECR;
nor->read_dummy = 8;
nor->read_proto = SNOR_PROTO_1_1_1;
nor->dirmap.rdesc = NULL;
ret = spi_nor_read_data(nor, addr, len, buf);
nor->read_opcode = read_opcode;
nor->addr_nbytes = addr_nbytes;
nor->read_dummy = read_dummy;
nor->read_proto = read_proto;
nor->dirmap.rdesc = rdesc;
return ret;
}
/**
* spi_nor_otp_write_secr() - write security register
* @nor: pointer to 'struct spi_nor'
* @addr: offset to write to
* @len: number of bytes to write
* @buf: pointer to src buffer
*
* Write a security register by using the SPINOR_OP_PSECR commands.
*
* For more information on the term "security register", see the documentation
* of spi_nor_otp_read_secr().
*
* This method is used on GigaDevice and Winbond flashes.
*
* Please note, the write must not span multiple registers.
*
* Return: number of bytes written successfully, -errno otherwise
*/
int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len,
const u8 *buf)
{
enum spi_nor_protocol write_proto;
struct spi_mem_dirmap_desc *wdesc;
u8 addr_nbytes, program_opcode;
int ret, written;
program_opcode = nor->program_opcode;
addr_nbytes = nor->addr_nbytes;
write_proto = nor->write_proto;
wdesc = nor->dirmap.wdesc;
nor->program_opcode = SPINOR_OP_PSECR;
nor->write_proto = SNOR_PROTO_1_1_1;
nor->dirmap.wdesc = NULL;
/*
* We only support a write to one single page. For now all winbond
* flashes only have one page per security register.
*/
ret = spi_nor_write_enable(nor);
if (ret)
goto out;
written = spi_nor_write_data(nor, addr, len, buf);
if (written < 0)
goto out;
ret = spi_nor_wait_till_ready(nor);
out:
nor->program_opcode = program_opcode;
nor->addr_nbytes = addr_nbytes;
nor->write_proto = write_proto;
nor->dirmap.wdesc = wdesc;
return ret ?: written;
}
/**
* spi_nor_otp_erase_secr() - erase a security register
* @nor: pointer to 'struct spi_nor'
* @addr: offset of the security register to be erased
*
* Erase a security register by using the SPINOR_OP_ESECR command.
*
* For more information on the term "security register", see the documentation
* of spi_nor_otp_read_secr().
*
* This method is used on GigaDevice and Winbond flashes.
*
* Return: 0 on success, -errno otherwise
*/
int spi_nor_otp_erase_secr(struct spi_nor *nor, loff_t addr)
{
u8 erase_opcode = nor->erase_opcode;
int ret;
ret = spi_nor_write_enable(nor);
if (ret)
return ret;
nor->erase_opcode = SPINOR_OP_ESECR;
ret = spi_nor_erase_sector(nor, addr);
nor->erase_opcode = erase_opcode;
if (ret)
return ret;
return spi_nor_wait_till_ready(nor);
}
static int spi_nor_otp_lock_bit_cr(unsigned int region)
{
static const int lock_bits[] = { SR2_LB1, SR2_LB2, SR2_LB3 };
if (region >= ARRAY_SIZE(lock_bits))
return -EINVAL;
return lock_bits[region];
}
/**
* spi_nor_otp_lock_sr2() - lock the OTP region
* @nor: pointer to 'struct spi_nor'
* @region: OTP region
*
* Lock the OTP region by writing the status register-2. This method is used on
* GigaDevice and Winbond flashes.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_otp_lock_sr2(struct spi_nor *nor, unsigned int region)
{
u8 *cr = nor->bouncebuf;
int ret, lock_bit;
lock_bit = spi_nor_otp_lock_bit_cr(region);
if (lock_bit < 0)
return lock_bit;
ret = spi_nor_read_cr(nor, cr);
if (ret)
return ret;
/* no need to write the register if region is already locked */
if (cr[0] & lock_bit)
return 0;
cr[0] |= lock_bit;
return spi_nor_write_16bit_cr_and_check(nor, cr[0]);
}
/**
* spi_nor_otp_is_locked_sr2() - get the OTP region lock status
* @nor: pointer to 'struct spi_nor'
* @region: OTP region
*
* Retrieve the OTP region lock bit by reading the status register-2. This
* method is used on GigaDevice and Winbond flashes.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_otp_is_locked_sr2(struct spi_nor *nor, unsigned int region)
{
u8 *cr = nor->bouncebuf;
int ret, lock_bit;
lock_bit = spi_nor_otp_lock_bit_cr(region);
if (lock_bit < 0)
return lock_bit;
ret = spi_nor_read_cr(nor, cr);
if (ret)
return ret;
return cr[0] & lock_bit;
}
static loff_t spi_nor_otp_region_start(const struct spi_nor *nor, unsigned int region)
{
const struct spi_nor_otp_organization *org = nor->params->otp.org;
return org->base + region * org->offset;
}
static size_t spi_nor_otp_size(struct spi_nor *nor)
{
return spi_nor_otp_n_regions(nor) * spi_nor_otp_region_len(nor);
}
/* Translate the file offsets from and to OTP regions. */
static loff_t spi_nor_otp_region_to_offset(struct spi_nor *nor, unsigned int region)
{
return region * spi_nor_otp_region_len(nor);
}
static unsigned int spi_nor_otp_offset_to_region(struct spi_nor *nor, loff_t ofs)
{
return div64_u64(ofs, spi_nor_otp_region_len(nor));
}
static int spi_nor_mtd_otp_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
const struct spi_nor_otp_ops *ops = nor->params->otp.ops;
unsigned int n_regions = spi_nor_otp_n_regions(nor);
unsigned int i;
int ret, locked;
if (len < n_regions * sizeof(*buf))
return -ENOSPC;
ret = spi_nor_prep_and_lock(nor);
if (ret)
return ret;
for (i = 0; i < n_regions; i++) {
buf->start = spi_nor_otp_region_to_offset(nor, i);
buf->length = spi_nor_otp_region_len(nor);
locked = ops->is_locked(nor, i);
if (locked < 0) {
ret = locked;
goto out;
}
buf->locked = !!locked;
buf++;
}
*retlen = n_regions * sizeof(*buf);
out:
spi_nor_unlock_and_unprep(nor);
return ret;
}
static int spi_nor_mtd_otp_range_is_locked(struct spi_nor *nor, loff_t ofs,
size_t len)
{
const struct spi_nor_otp_ops *ops = nor->params->otp.ops;
unsigned int region;
int locked;
/*
* If any of the affected OTP regions are locked the entire range is
* considered locked.
*/
for (region = spi_nor_otp_offset_to_region(nor, ofs);
region <= spi_nor_otp_offset_to_region(nor, ofs + len - 1);
region++) {
locked = ops->is_locked(nor, region);
/* take the branch it is locked or in case of an error */
if (locked)
return locked;
}
return 0;
}
static int spi_nor_mtd_otp_read_write(struct mtd_info *mtd, loff_t ofs,
size_t total_len, size_t *retlen,
const u8 *buf, bool is_write)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
const struct spi_nor_otp_ops *ops = nor->params->otp.ops;
const size_t rlen = spi_nor_otp_region_len(nor);
loff_t rstart, rofs;
unsigned int region;
size_t len;
int ret;
if (ofs < 0 || ofs >= spi_nor_otp_size(nor))
return 0;
/* don't access beyond the end */
total_len = min_t(size_t, total_len, spi_nor_otp_size(nor) - ofs);
if (!total_len)
return 0;
ret = spi_nor_prep_and_lock(nor);
if (ret)
return ret;
if (is_write) {
ret = spi_nor_mtd_otp_range_is_locked(nor, ofs, total_len);
if (ret < 0) {
goto out;
} else if (ret) {
ret = -EROFS;
goto out;
}
}
while (total_len) {
/*
* The OTP regions are mapped into a contiguous area starting
* at 0 as expected by the MTD layer. This will map the MTD
* file offsets to the address of an OTP region as used in the
* actual SPI commands.
*/
region = spi_nor_otp_offset_to_region(nor, ofs);
rstart = spi_nor_otp_region_start(nor, region);
/*
* The size of a OTP region is expected to be a power of two,
* thus we can just mask the lower bits and get the offset into
* a region.
*/
rofs = ofs & (rlen - 1);
/* don't access beyond one OTP region */
len = min_t(size_t, total_len, rlen - rofs);
if (is_write)
ret = ops->write(nor, rstart + rofs, len, buf);
else
ret = ops->read(nor, rstart + rofs, len, (u8 *)buf);
if (ret == 0)
ret = -EIO;
if (ret < 0)
goto out;
*retlen += ret;
ofs += ret;
buf += ret;
total_len -= ret;
}
ret = 0;
out:
spi_nor_unlock_and_unprep(nor);
return ret;
}
static int spi_nor_mtd_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u8 *buf)
{
return spi_nor_mtd_otp_read_write(mtd, from, len, retlen, buf, false);
}
static int spi_nor_mtd_otp_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u8 *buf)
{
return spi_nor_mtd_otp_read_write(mtd, to, len, retlen, buf, true);
}
static int spi_nor_mtd_otp_erase(struct mtd_info *mtd, loff_t from, size_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
const struct spi_nor_otp_ops *ops = nor->params->otp.ops;
const size_t rlen = spi_nor_otp_region_len(nor);
unsigned int region;
loff_t rstart;
int ret;
/* OTP erase is optional */
if (!ops->erase)
return -EOPNOTSUPP;
if (!len)
return 0;
if (from < 0 || (from + len) > spi_nor_otp_size(nor))
return -EINVAL;
/* the user has to explicitly ask for whole regions */
if (!IS_ALIGNED(len, rlen) || !IS_ALIGNED(from, rlen))
return -EINVAL;
ret = spi_nor_prep_and_lock(nor);
if (ret)
return ret;
ret = spi_nor_mtd_otp_range_is_locked(nor, from, len);
if (ret < 0) {
goto out;
} else if (ret) {
ret = -EROFS;
goto out;
}
while (len) {
region = spi_nor_otp_offset_to_region(nor, from);
rstart = spi_nor_otp_region_start(nor, region);
ret = ops->erase(nor, rstart);
if (ret)
goto out;
len -= rlen;
from += rlen;
}
out:
spi_nor_unlock_and_unprep(nor);
return ret;
}
static int spi_nor_mtd_otp_lock(struct mtd_info *mtd, loff_t from, size_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
const struct spi_nor_otp_ops *ops = nor->params->otp.ops;
const size_t rlen = spi_nor_otp_region_len(nor);
unsigned int region;
int ret;
if (from < 0 || (from + len) > spi_nor_otp_size(nor))
return -EINVAL;
/* the user has to explicitly ask for whole regions */
if (!IS_ALIGNED(len, rlen) || !IS_ALIGNED(from, rlen))
return -EINVAL;
ret = spi_nor_prep_and_lock(nor);
if (ret)
return ret;
while (len) {
region = spi_nor_otp_offset_to_region(nor, from);
ret = ops->lock(nor, region);
if (ret)
goto out;
len -= rlen;
from += rlen;
}
out:
spi_nor_unlock_and_unprep(nor);
return ret;
}
void spi_nor_set_mtd_otp_ops(struct spi_nor *nor)
{
struct mtd_info *mtd = &nor->mtd;
if (!nor->params->otp.ops)
return;
if (WARN_ON(!is_power_of_2(spi_nor_otp_region_len(nor))))
return;
/*
* We only support user_prot callbacks (yet).
*
* Some SPI NOR flashes like Macronix ones can be ordered in two
* different variants. One with a factory locked OTP area and one where
* it is left to the user to write to it. The factory locked OTP is
* usually preprogrammed with an "electrical serial number". We don't
* support these for now.
*/
mtd->_get_user_prot_info = spi_nor_mtd_otp_info;
mtd->_read_user_prot_reg = spi_nor_mtd_otp_read;
mtd->_write_user_prot_reg = spi_nor_mtd_otp_write;
mtd->_lock_user_prot_reg = spi_nor_mtd_otp_lock;
mtd->_erase_user_prot_reg = spi_nor_mtd_otp_erase;
}
| linux-master | drivers/mtd/spi-nor/otp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/mtd/spi-nor.h>
#include "core.h"
static int
gd25q256_post_bfpt(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
/*
* GD25Q256C supports the first version of JESD216 which does not define
* the Quad Enable methods. Overwrite the default Quad Enable method.
*
* GD25Q256 GENERATION | SFDP MAJOR VERSION | SFDP MINOR VERSION
* GD25Q256C | SFDP_JESD216_MAJOR | SFDP_JESD216_MINOR
* GD25Q256D | SFDP_JESD216_MAJOR | SFDP_JESD216B_MINOR
* GD25Q256E | SFDP_JESD216_MAJOR | SFDP_JESD216B_MINOR
*/
if (bfpt_header->major == SFDP_JESD216_MAJOR &&
bfpt_header->minor == SFDP_JESD216_MINOR)
nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
return 0;
}
static const struct spi_nor_fixups gd25q256_fixups = {
.post_bfpt = gd25q256_post_bfpt,
};
static const struct flash_info gigadevice_nor_parts[] = {
{ "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512)
PARSE_SFDP
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
.fixups = &gd25q256_fixups },
};
const struct spi_nor_manufacturer spi_nor_gigadevice = {
.name = "gigadevice",
.parts = gigadevice_nor_parts,
.nparts = ARRAY_SIZE(gigadevice_nor_parts),
};
| linux-master | drivers/mtd/spi-nor/gigadevice.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/mtd/spi-nor.h>
#include "core.h"
static int
mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
/*
* MX25L25635F supports 4B opcodes but MX25L25635E does not.
* Unfortunately, Macronix has re-used the same JEDEC ID for both
* variants which prevents us from defining a new entry in the parts
* table.
* We need a way to differentiate MX25L25635E and MX25L25635F, and it
* seems that the F version advertises support for Fast Read 4-4-4 in
* its BFPT table.
*/
if (bfpt->dwords[SFDP_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
nor->flags |= SNOR_F_4B_OPCODES;
return 0;
}
static const struct spi_nor_fixups mx25l25635_fixups = {
.post_bfpt = mx25l25635_post_bfpt_fixups,
};
static const struct flash_info macronix_nor_parts[] = {
/* Macronix */
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16) },
{ "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP)
NO_SFDP_FLAGS(SECT_4K) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256) },
{ "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &mx25l25635_fixups },
{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "mx25uw51245g", INFOB(0xc2813a, 0, 0, 0, 4)
PARSE_SFDP
FLAGS(SPI_NOR_RWW) },
{ "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512) },
{ "mx66l51235f", INFO(0xc2201a, 0, 64 * 1024, 1024)
NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048)
NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
{ "mx66u2g45g", INFO(0xc2253c, 0, 64 * 1024, 4096)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
};
static void macronix_nor_default_init(struct spi_nor *nor)
{
nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
}
static int macronix_nor_late_init(struct spi_nor *nor)
{
if (!nor->params->set_4byte_addr_mode)
nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_en4b_ex4b;
return 0;
}
static const struct spi_nor_fixups macronix_nor_fixups = {
.default_init = macronix_nor_default_init,
.late_init = macronix_nor_late_init,
};
const struct spi_nor_manufacturer spi_nor_macronix = {
.name = "macronix",
.parts = macronix_nor_parts,
.nparts = ARRAY_SIZE(macronix_nor_parts),
.fixups = ¯onix_nor_fixups,
};
| linux-master | drivers/mtd/spi-nor/macronix.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/mtd/spi-nor.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/sysfs.h>
#include "core.h"
static ssize_t manufacturer_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct spi_device *spi = to_spi_device(dev);
struct spi_mem *spimem = spi_get_drvdata(spi);
struct spi_nor *nor = spi_mem_get_drvdata(spimem);
return sysfs_emit(buf, "%s\n", nor->manufacturer->name);
}
static DEVICE_ATTR_RO(manufacturer);
static ssize_t partname_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct spi_device *spi = to_spi_device(dev);
struct spi_mem *spimem = spi_get_drvdata(spi);
struct spi_nor *nor = spi_mem_get_drvdata(spimem);
return sysfs_emit(buf, "%s\n", nor->info->name);
}
static DEVICE_ATTR_RO(partname);
static ssize_t jedec_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct spi_device *spi = to_spi_device(dev);
struct spi_mem *spimem = spi_get_drvdata(spi);
struct spi_nor *nor = spi_mem_get_drvdata(spimem);
const u8 *id = nor->info->id_len ? nor->info->id : nor->id;
u8 id_len = nor->info->id_len ?: SPI_NOR_MAX_ID_LEN;
return sysfs_emit(buf, "%*phN\n", id_len, id);
}
static DEVICE_ATTR_RO(jedec_id);
static struct attribute *spi_nor_sysfs_entries[] = {
&dev_attr_manufacturer.attr,
&dev_attr_partname.attr,
&dev_attr_jedec_id.attr,
NULL
};
static ssize_t sfdp_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct spi_device *spi = to_spi_device(kobj_to_dev(kobj));
struct spi_mem *spimem = spi_get_drvdata(spi);
struct spi_nor *nor = spi_mem_get_drvdata(spimem);
struct sfdp *sfdp = nor->sfdp;
size_t sfdp_size = sfdp->num_dwords * sizeof(*sfdp->dwords);
return memory_read_from_buffer(buf, count, &off, nor->sfdp->dwords,
sfdp_size);
}
static BIN_ATTR_RO(sfdp, 0);
static struct bin_attribute *spi_nor_sysfs_bin_entries[] = {
&bin_attr_sfdp,
NULL
};
static umode_t spi_nor_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct spi_device *spi = to_spi_device(kobj_to_dev(kobj));
struct spi_mem *spimem = spi_get_drvdata(spi);
struct spi_nor *nor = spi_mem_get_drvdata(spimem);
if (attr == &dev_attr_manufacturer.attr && !nor->manufacturer)
return 0;
if (attr == &dev_attr_jedec_id.attr && !nor->info->id_len && !nor->id)
return 0;
return 0444;
}
static umode_t spi_nor_sysfs_is_bin_visible(struct kobject *kobj,
struct bin_attribute *attr, int n)
{
struct spi_device *spi = to_spi_device(kobj_to_dev(kobj));
struct spi_mem *spimem = spi_get_drvdata(spi);
struct spi_nor *nor = spi_mem_get_drvdata(spimem);
if (attr == &bin_attr_sfdp && nor->sfdp)
return 0444;
return 0;
}
static const struct attribute_group spi_nor_sysfs_group = {
.name = "spi-nor",
.is_visible = spi_nor_sysfs_is_visible,
.is_bin_visible = spi_nor_sysfs_is_bin_visible,
.attrs = spi_nor_sysfs_entries,
.bin_attrs = spi_nor_sysfs_bin_entries,
};
const struct attribute_group *spi_nor_sysfs_groups[] = {
&spi_nor_sysfs_group,
NULL
};
| linux-master | drivers/mtd/spi-nor/sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/mtd/spi-nor.h>
#include "core.h"
static const struct flash_info fujitsu_nor_parts[] = {
/* Fujitsu */
{ "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1)
FLAGS(SPI_NOR_NO_ERASE) },
};
const struct spi_nor_manufacturer spi_nor_fujitsu = {
.name = "fujitsu",
.parts = fujitsu_nor_parts,
.nparts = ARRAY_SIZE(fujitsu_nor_parts),
};
| linux-master | drivers/mtd/spi-nor/fujitsu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SPI NOR Software Write Protection logic.
*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/mtd/mtd.h>
#include <linux/mtd/spi-nor.h>
#include "core.h"
static u8 spi_nor_get_sr_bp_mask(struct spi_nor *nor)
{
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6)
return mask | SR_BP3_BIT6;
if (nor->flags & SNOR_F_HAS_4BIT_BP)
return mask | SR_BP3;
return mask;
}
static u8 spi_nor_get_sr_tb_mask(struct spi_nor *nor)
{
if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
return SR_TB_BIT6;
else
return SR_TB_BIT5;
}
static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
{
unsigned int bp_slots, bp_slots_needed;
u8 mask = spi_nor_get_sr_bp_mask(nor);
/* Reserved one for "protect none" and one for "protect all". */
bp_slots = (1 << hweight8(mask)) - 2;
bp_slots_needed = ilog2(nor->info->n_sectors);
if (bp_slots_needed > bp_slots)
return nor->info->sector_size <<
(bp_slots_needed - bp_slots);
else
return nor->info->sector_size;
}
static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
uint64_t *len)
{
struct mtd_info *mtd = &nor->mtd;
u64 min_prot_len;
u8 mask = spi_nor_get_sr_bp_mask(nor);
u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
u8 bp, val = sr & mask;
if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3_BIT6)
val = (val & ~SR_BP3_BIT6) | SR_BP3;
bp = val >> SR_BP_SHIFT;
if (!bp) {
/* No protection */
*ofs = 0;
*len = 0;
return;
}
min_prot_len = spi_nor_get_min_prot_length_sr(nor);
*len = min_prot_len << (bp - 1);
if (*len > mtd->size)
*len = mtd->size;
if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
*ofs = 0;
else
*ofs = mtd->size - *len;
}
/*
* Return true if the entire region is locked (if @locked is true) or unlocked
* (if @locked is false); false otherwise.
*/
static bool spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
uint64_t len, u8 sr, bool locked)
{
loff_t lock_offs, lock_offs_max, offs_max;
uint64_t lock_len;
if (!len)
return true;
spi_nor_get_locked_range_sr(nor, sr, &lock_offs, &lock_len);
lock_offs_max = lock_offs + lock_len;
offs_max = ofs + len;
if (locked)
/* Requested range is a sub-range of locked range */
return (offs_max <= lock_offs_max) && (ofs >= lock_offs);
else
/* Requested range does not overlap with locked range */
return (ofs >= lock_offs_max) || (offs_max <= lock_offs);
}
static bool spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
u8 sr)
{
return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true);
}
static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs,
uint64_t len, u8 sr)
{
return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false);
}
/*
* Lock a region of the flash. Compatible with ST Micro and similar flash.
* Supports the block protection bits BP{0,1,2}/BP{0,1,2,3} in the status
* register
* (SR). Does not support these features found in newer SR bitfields:
* - SEC: sector/block protect - only handle SEC=0 (block protect)
* - CMP: complement protect - only support CMP=0 (range is not complemented)
*
* Support for the following is provided conditionally for some flash:
* - TB: top/bottom protect
*
* Sample table portion for 8MB flash (Winbond w25q64fw):
*
* SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
* --------------------------------------------------------------------------
* X | X | 0 | 0 | 0 | NONE | NONE
* 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
* 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
* 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
* 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
* 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
* 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
* X | X | 1 | 1 | 1 | 8 MB | ALL
* ------|-------|-------|-------|-------|---------------|-------------------
* 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
* 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
* 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
* 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
* 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
* 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
*
* Returns negative on errors, 0 on success.
*/
static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
struct mtd_info *mtd = &nor->mtd;
u64 min_prot_len;
int ret, status_old, status_new;
u8 mask = spi_nor_get_sr_bp_mask(nor);
u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
u8 pow, val;
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
ret = spi_nor_read_sr(nor, nor->bouncebuf);
if (ret)
return ret;
status_old = nor->bouncebuf[0];
/* If nothing in our range is unlocked, we don't need to do anything */
if (spi_nor_is_locked_sr(nor, ofs, len, status_old))
return 0;
/* If anything below us is unlocked, we can't use 'bottom' protection */
if (!spi_nor_is_locked_sr(nor, 0, ofs, status_old))
can_be_bottom = false;
/* If anything above us is unlocked, we can't use 'top' protection */
if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
status_old))
can_be_top = false;
if (!can_be_bottom && !can_be_top)
return -EINVAL;
/* Prefer top, if both are valid */
use_top = can_be_top;
/* lock_len: length of region that should end up locked */
if (use_top)
lock_len = mtd->size - ofs;
else
lock_len = ofs + len;
if (lock_len == mtd->size) {
val = mask;
} else {
min_prot_len = spi_nor_get_min_prot_length_sr(nor);
pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
val = pow << SR_BP_SHIFT;
if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
val = (val & ~SR_BP3) | SR_BP3_BIT6;
if (val & ~mask)
return -EINVAL;
/* Don't "lock" with no region! */
if (!(val & mask))
return -EINVAL;
}
status_new = (status_old & ~mask & ~tb_mask) | val;
/*
* Disallow further writes if WP# pin is neither left floating nor
* wrongly tied to GND (that includes internal pull-downs).
* WP# pin hard strapped to GND can be a valid use case.
*/
if (!(nor->flags & SNOR_F_NO_WP))
status_new |= SR_SRWD;
if (!use_top)
status_new |= tb_mask;
/* Don't bother if they're the same */
if (status_new == status_old)
return 0;
/* Only modify protection if it will not unlock other areas */
if ((status_new & mask) < (status_old & mask))
return -EINVAL;
return spi_nor_write_sr_and_check(nor, status_new);
}
/*
* Unlock a region of the flash. See spi_nor_sr_lock() for more info
*
* Returns negative on errors, 0 on success.
*/
static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
struct mtd_info *mtd = &nor->mtd;
u64 min_prot_len;
int ret, status_old, status_new;
u8 mask = spi_nor_get_sr_bp_mask(nor);
u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
u8 pow, val;
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
ret = spi_nor_read_sr(nor, nor->bouncebuf);
if (ret)
return ret;
status_old = nor->bouncebuf[0];
/* If nothing in our range is locked, we don't need to do anything */
if (spi_nor_is_unlocked_sr(nor, ofs, len, status_old))
return 0;
/* If anything below us is locked, we can't use 'top' protection */
if (!spi_nor_is_unlocked_sr(nor, 0, ofs, status_old))
can_be_top = false;
/* If anything above us is locked, we can't use 'bottom' protection */
if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
status_old))
can_be_bottom = false;
if (!can_be_bottom && !can_be_top)
return -EINVAL;
/* Prefer top, if both are valid */
use_top = can_be_top;
/* lock_len: length of region that should remain locked */
if (use_top)
lock_len = mtd->size - (ofs + len);
else
lock_len = ofs;
if (lock_len == 0) {
val = 0; /* fully unlocked */
} else {
min_prot_len = spi_nor_get_min_prot_length_sr(nor);
pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
val = pow << SR_BP_SHIFT;
if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
val = (val & ~SR_BP3) | SR_BP3_BIT6;
/* Some power-of-two sizes are not supported */
if (val & ~mask)
return -EINVAL;
}
status_new = (status_old & ~mask & ~tb_mask) | val;
/* Don't protect status register if we're fully unlocked */
if (lock_len == 0)
status_new &= ~SR_SRWD;
if (!use_top)
status_new |= tb_mask;
/* Don't bother if they're the same */
if (status_new == status_old)
return 0;
/* Only modify protection if it will not lock other areas */
if ((status_new & mask) > (status_old & mask))
return -EINVAL;
return spi_nor_write_sr_and_check(nor, status_new);
}
/*
* Check if a region of the flash is (completely) locked. See spi_nor_sr_lock()
* for more info.
*
* Returns 1 if entire region is locked, 0 if any portion is unlocked, and
* negative on errors.
*/
static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
int ret;
ret = spi_nor_read_sr(nor, nor->bouncebuf);
if (ret)
return ret;
return spi_nor_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
}
static const struct spi_nor_locking_ops spi_nor_sr_locking_ops = {
.lock = spi_nor_sr_lock,
.unlock = spi_nor_sr_unlock,
.is_locked = spi_nor_sr_is_locked,
};
void spi_nor_init_default_locking_ops(struct spi_nor *nor)
{
nor->params->locking_ops = &spi_nor_sr_locking_ops;
}
static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
ret = spi_nor_prep_and_lock(nor);
if (ret)
return ret;
ret = nor->params->locking_ops->lock(nor, ofs, len);
spi_nor_unlock_and_unprep(nor);
return ret;
}
static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
ret = spi_nor_prep_and_lock(nor);
if (ret)
return ret;
ret = nor->params->locking_ops->unlock(nor, ofs, len);
spi_nor_unlock_and_unprep(nor);
return ret;
}
static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
ret = spi_nor_prep_and_lock(nor);
if (ret)
return ret;
ret = nor->params->locking_ops->is_locked(nor, ofs, len);
spi_nor_unlock_and_unprep(nor);
return ret;
}
/**
* spi_nor_try_unlock_all() - Tries to unlock the entire flash memory array.
* @nor: pointer to a 'struct spi_nor'.
*
* Some SPI NOR flashes are write protected by default after a power-on reset
* cycle, in order to avoid inadvertent writes during power-up. Backward
* compatibility imposes to unlock the entire flash memory array at power-up
* by default.
*
* Unprotecting the entire flash array will fail for boards which are hardware
* write-protected. Thus any errors are ignored.
*/
void spi_nor_try_unlock_all(struct spi_nor *nor)
{
int ret;
if (!(nor->flags & SNOR_F_HAS_LOCK))
return;
dev_dbg(nor->dev, "Unprotecting entire flash array\n");
ret = spi_nor_unlock(&nor->mtd, 0, nor->params->size);
if (ret)
dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
}
void spi_nor_set_mtd_locking_ops(struct spi_nor *nor)
{
struct mtd_info *mtd = &nor->mtd;
if (!nor->params->locking_ops)
return;
mtd->_lock = spi_nor_lock;
mtd->_unlock = spi_nor_unlock;
mtd->_is_locked = spi_nor_is_locked;
}
| linux-master | drivers/mtd/spi-nor/swp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/mtd/spi-nor.h>
#include "core.h"
/* SST flash_info mfr_flag. Used to specify SST byte programming. */
#define SST_WRITE BIT(0)
#define SST26VF_CR_BPNV BIT(3)
static int sst26vf_nor_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
return -EOPNOTSUPP;
}
static int sst26vf_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
int ret;
/* We only support unlocking the entire flash array. */
if (ofs != 0 || len != nor->params->size)
return -EINVAL;
ret = spi_nor_read_cr(nor, nor->bouncebuf);
if (ret)
return ret;
if (!(nor->bouncebuf[0] & SST26VF_CR_BPNV)) {
dev_dbg(nor->dev, "Any block has been permanently locked\n");
return -EINVAL;
}
return spi_nor_global_block_unlock(nor);
}
static int sst26vf_nor_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
return -EOPNOTSUPP;
}
static const struct spi_nor_locking_ops sst26vf_nor_locking_ops = {
.lock = sst26vf_nor_lock,
.unlock = sst26vf_nor_unlock,
.is_locked = sst26vf_nor_is_locked,
};
static int sst26vf_nor_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &sst26vf_nor_locking_ops;
return 0;
}
static const struct spi_nor_fixups sst26vf_nor_fixups = {
.late_init = sst26vf_nor_late_init,
};
static const struct flash_info sst_nor_parts[] = {
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP |
SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K) },
{ "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SECT_4K) },
{ "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SECT_4K) },
{ "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K)
MFR_FLAGS(SST_WRITE) },
{ "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "sst26vf032b", INFO(0xbf2642, 0, 0, 0)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
PARSE_SFDP
.fixups = &sst26vf_nor_fixups },
{ "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &sst26vf_nor_fixups },
};
static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
size_t actual = 0;
int ret;
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
ret = spi_nor_prep_and_lock(nor);
if (ret)
return ret;
ret = spi_nor_write_enable(nor);
if (ret)
goto out;
nor->sst_write_second = false;
/* Start write from odd address. */
if (to % 2) {
nor->program_opcode = SPINOR_OP_BP;
/* write one byte. */
ret = spi_nor_write_data(nor, to, 1, buf);
if (ret < 0)
goto out;
WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
goto out;
to++;
actual++;
}
/* Write out most of the data here. */
for (; actual < len - 1; actual += 2) {
nor->program_opcode = SPINOR_OP_AAI_WP;
/* write two bytes. */
ret = spi_nor_write_data(nor, to, 2, buf + actual);
if (ret < 0)
goto out;
WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
goto out;
to += 2;
nor->sst_write_second = true;
}
nor->sst_write_second = false;
ret = spi_nor_write_disable(nor);
if (ret)
goto out;
ret = spi_nor_wait_till_ready(nor);
if (ret)
goto out;
/* Write out trailing byte if it exists. */
if (actual != len) {
ret = spi_nor_write_enable(nor);
if (ret)
goto out;
nor->program_opcode = SPINOR_OP_BP;
ret = spi_nor_write_data(nor, to, 1, buf + actual);
if (ret < 0)
goto out;
WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
goto out;
actual += 1;
ret = spi_nor_write_disable(nor);
}
out:
*retlen += actual;
spi_nor_unlock_and_unprep(nor);
return ret;
}
static int sst_nor_late_init(struct spi_nor *nor)
{
if (nor->info->mfr_flags & SST_WRITE)
nor->mtd._write = sst_nor_write;
return 0;
}
static const struct spi_nor_fixups sst_nor_fixups = {
.late_init = sst_nor_late_init,
};
const struct spi_nor_manufacturer spi_nor_sst = {
.name = "sst",
.parts = sst_nor_parts,
.nparts = ARRAY_SIZE(sst_nor_parts),
.fixups = &sst_nor_fixups,
};
| linux-master | drivers/mtd/spi-nor/sst.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/debugfs.h>
#include <linux/mtd/spi-nor.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include "core.h"
#define SPI_NOR_DEBUGFS_ROOT "spi-nor"
#define SNOR_F_NAME(name) [ilog2(SNOR_F_##name)] = #name
static const char *const snor_f_names[] = {
SNOR_F_NAME(HAS_SR_TB),
SNOR_F_NAME(NO_OP_CHIP_ERASE),
SNOR_F_NAME(BROKEN_RESET),
SNOR_F_NAME(4B_OPCODES),
SNOR_F_NAME(HAS_4BAIT),
SNOR_F_NAME(HAS_LOCK),
SNOR_F_NAME(HAS_16BIT_SR),
SNOR_F_NAME(NO_READ_CR),
SNOR_F_NAME(HAS_SR_TB_BIT6),
SNOR_F_NAME(HAS_4BIT_BP),
SNOR_F_NAME(HAS_SR_BP3_BIT6),
SNOR_F_NAME(IO_MODE_EN_VOLATILE),
SNOR_F_NAME(SOFT_RESET),
SNOR_F_NAME(SWP_IS_VOLATILE),
SNOR_F_NAME(RWW),
SNOR_F_NAME(ECC),
SNOR_F_NAME(NO_WP),
};
#undef SNOR_F_NAME
static const char *spi_nor_protocol_name(enum spi_nor_protocol proto)
{
switch (proto) {
case SNOR_PROTO_1_1_1: return "1S-1S-1S";
case SNOR_PROTO_1_1_2: return "1S-1S-2S";
case SNOR_PROTO_1_1_4: return "1S-1S-4S";
case SNOR_PROTO_1_1_8: return "1S-1S-8S";
case SNOR_PROTO_1_2_2: return "1S-2S-2S";
case SNOR_PROTO_1_4_4: return "1S-4S-4S";
case SNOR_PROTO_1_8_8: return "1S-8S-8S";
case SNOR_PROTO_2_2_2: return "2S-2S-2S";
case SNOR_PROTO_4_4_4: return "4S-4S-4S";
case SNOR_PROTO_8_8_8: return "8S-8S-8S";
case SNOR_PROTO_1_1_1_DTR: return "1D-1D-1D";
case SNOR_PROTO_1_2_2_DTR: return "1D-2D-2D";
case SNOR_PROTO_1_4_4_DTR: return "1D-4D-4D";
case SNOR_PROTO_1_8_8_DTR: return "1D-8D-8D";
case SNOR_PROTO_8_8_8_DTR: return "8D-8D-8D";
}
return "<unknown>";
}
static void spi_nor_print_flags(struct seq_file *s, unsigned long flags,
const char *const *names, int names_len)
{
bool sep = false;
int i;
for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
if (!(flags & BIT(i)))
continue;
if (sep)
seq_puts(s, " | ");
sep = true;
if (i < names_len && names[i])
seq_puts(s, names[i]);
else
seq_printf(s, "1<<%d", i);
}
}
static int spi_nor_params_show(struct seq_file *s, void *data)
{
struct spi_nor *nor = s->private;
struct spi_nor_flash_parameter *params = nor->params;
struct spi_nor_erase_map *erase_map = ¶ms->erase_map;
struct spi_nor_erase_region *region;
const struct flash_info *info = nor->info;
char buf[16], *str;
int i;
seq_printf(s, "name\t\t%s\n", info->name);
seq_printf(s, "id\t\t%*ph\n", SPI_NOR_MAX_ID_LEN, nor->id);
string_get_size(params->size, 1, STRING_UNITS_2, buf, sizeof(buf));
seq_printf(s, "size\t\t%s\n", buf);
seq_printf(s, "write size\t%u\n", params->writesize);
seq_printf(s, "page size\t%u\n", params->page_size);
seq_printf(s, "address nbytes\t%u\n", nor->addr_nbytes);
seq_puts(s, "flags\t\t");
spi_nor_print_flags(s, nor->flags, snor_f_names, sizeof(snor_f_names));
seq_puts(s, "\n");
seq_puts(s, "\nopcodes\n");
seq_printf(s, " read\t\t0x%02x\n", nor->read_opcode);
seq_printf(s, " dummy cycles\t%u\n", nor->read_dummy);
seq_printf(s, " erase\t\t0x%02x\n", nor->erase_opcode);
seq_printf(s, " program\t0x%02x\n", nor->program_opcode);
switch (nor->cmd_ext_type) {
case SPI_NOR_EXT_NONE:
str = "none";
break;
case SPI_NOR_EXT_REPEAT:
str = "repeat";
break;
case SPI_NOR_EXT_INVERT:
str = "invert";
break;
default:
str = "<unknown>";
break;
}
seq_printf(s, " 8D extension\t%s\n", str);
seq_puts(s, "\nprotocols\n");
seq_printf(s, " read\t\t%s\n",
spi_nor_protocol_name(nor->read_proto));
seq_printf(s, " write\t\t%s\n",
spi_nor_protocol_name(nor->write_proto));
seq_printf(s, " register\t%s\n",
spi_nor_protocol_name(nor->reg_proto));
seq_puts(s, "\nerase commands\n");
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
struct spi_nor_erase_type *et = &erase_map->erase_type[i];
if (et->size) {
string_get_size(et->size, 1, STRING_UNITS_2, buf,
sizeof(buf));
seq_printf(s, " %02x (%s) [%d]\n", et->opcode, buf, i);
}
}
if (!(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
string_get_size(params->size, 1, STRING_UNITS_2, buf, sizeof(buf));
seq_printf(s, " %02x (%s)\n", SPINOR_OP_CHIP_ERASE, buf);
}
seq_puts(s, "\nsector map\n");
seq_puts(s, " region (in hex) | erase mask | flags\n");
seq_puts(s, " ------------------+------------+----------\n");
for (region = erase_map->regions;
region;
region = spi_nor_region_next(region)) {
u64 start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
u64 flags = region->offset & SNOR_ERASE_FLAGS_MASK;
u64 end = start + region->size - 1;
seq_printf(s, " %08llx-%08llx | [%c%c%c%c] | %s\n",
start, end,
flags & BIT(0) ? '0' : ' ',
flags & BIT(1) ? '1' : ' ',
flags & BIT(2) ? '2' : ' ',
flags & BIT(3) ? '3' : ' ',
flags & SNOR_OVERLAID_REGION ? "overlaid" : "");
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(spi_nor_params);
static void spi_nor_print_read_cmd(struct seq_file *s, u32 cap,
struct spi_nor_read_command *cmd)
{
seq_printf(s, " %s%s\n", spi_nor_protocol_name(cmd->proto),
cap == SNOR_HWCAPS_READ_FAST ? " (fast read)" : "");
seq_printf(s, " opcode\t0x%02x\n", cmd->opcode);
seq_printf(s, " mode cycles\t%u\n", cmd->num_mode_clocks);
seq_printf(s, " dummy cycles\t%u\n", cmd->num_wait_states);
}
static void spi_nor_print_pp_cmd(struct seq_file *s,
struct spi_nor_pp_command *cmd)
{
seq_printf(s, " %s\n", spi_nor_protocol_name(cmd->proto));
seq_printf(s, " opcode\t0x%02x\n", cmd->opcode);
}
static int spi_nor_capabilities_show(struct seq_file *s, void *data)
{
struct spi_nor *nor = s->private;
struct spi_nor_flash_parameter *params = nor->params;
u32 hwcaps = params->hwcaps.mask;
int i, cmd;
seq_puts(s, "Supported read modes by the flash\n");
for (i = 0; i < sizeof(hwcaps) * BITS_PER_BYTE; i++) {
if (!(hwcaps & BIT(i)))
continue;
cmd = spi_nor_hwcaps_read2cmd(BIT(i));
if (cmd < 0)
continue;
spi_nor_print_read_cmd(s, BIT(i), ¶ms->reads[cmd]);
hwcaps &= ~BIT(i);
}
seq_puts(s, "\nSupported page program modes by the flash\n");
for (i = 0; i < sizeof(hwcaps) * BITS_PER_BYTE; i++) {
if (!(hwcaps & BIT(i)))
continue;
cmd = spi_nor_hwcaps_pp2cmd(BIT(i));
if (cmd < 0)
continue;
spi_nor_print_pp_cmd(s, ¶ms->page_programs[cmd]);
hwcaps &= ~BIT(i);
}
if (hwcaps)
seq_printf(s, "\nunknown hwcaps 0x%x\n", hwcaps);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(spi_nor_capabilities);
static void spi_nor_debugfs_unregister(void *data)
{
struct spi_nor *nor = data;
debugfs_remove(nor->debugfs_root);
nor->debugfs_root = NULL;
}
static struct dentry *rootdir;
void spi_nor_debugfs_register(struct spi_nor *nor)
{
struct dentry *d;
int ret;
if (!rootdir)
rootdir = debugfs_create_dir(SPI_NOR_DEBUGFS_ROOT, NULL);
ret = devm_add_action(nor->dev, spi_nor_debugfs_unregister, nor);
if (ret)
return;
d = debugfs_create_dir(dev_name(nor->dev), rootdir);
nor->debugfs_root = d;
debugfs_create_file("params", 0444, d, nor, &spi_nor_params_fops);
debugfs_create_file("capabilities", 0444, d, nor,
&spi_nor_capabilities_fops);
}
void spi_nor_debugfs_shutdown(void)
{
debugfs_remove(rootdir);
}
| linux-master | drivers/mtd/spi-nor/debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/mtd/spi-nor.h>
#include "core.h"
#define WINBOND_NOR_OP_RDEAR 0xc8 /* Read Extended Address Register */
#define WINBOND_NOR_OP_WREAR 0xc5 /* Write Extended Address Register */
#define WINBOND_NOR_WREAR_OP(buf) \
SPI_MEM_OP(SPI_MEM_OP_CMD(WINBOND_NOR_OP_WREAR, 0), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(1, buf, 0))
static int
w25q256_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
/*
* W25Q256JV supports 4B opcodes but W25Q256FV does not.
* Unfortunately, Winbond has re-used the same JEDEC ID for both
* variants which prevents us from defining a new entry in the parts
* table.
* To differentiate between W25Q256JV and W25Q256FV check SFDP header
* version: only JV has JESD216A compliant structure (version 5).
*/
if (bfpt_header->major == SFDP_JESD216_MAJOR &&
bfpt_header->minor == SFDP_JESD216A_MINOR)
nor->flags |= SNOR_F_4B_OPCODES;
return 0;
}
static const struct spi_nor_fixups w25q256_fixups = {
.post_bfpt = w25q256_post_bfpt_fixups,
};
static const struct flash_info winbond_nor_parts[] = {
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
{ "w25x05", INFO(0xef3010, 0, 64 * 1024, 1)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25x10", INFO(0xef3011, 0, 64 * 1024, 2)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25x20", INFO(0xef3012, 0, 64 * 1024, 4)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25x40", INFO(0xef3013, 0, 64 * 1024, 8)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25x80", INFO(0xef3014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q32", INFO(0xef4016, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
OTP_INFO(256, 3, 0x1000, 0x1000) },
{ "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
OTP_INFO(256, 3, 0x1000, 0x1000) },
{ "w25q64jwm", INFO(0xef8017, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25q128jwm", INFO(0xef8018, 0, 64 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25q256jwm", INFO(0xef8019, 0, 64 * 1024, 512)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 0, 0)
PARSE_SFDP
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &w25q256_fixups },
{ "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512)
PARSE_SFDP },
{ "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ |
SPI_NOR_DUAL_READ) },
{ "w25q512nwq", INFO(0xef6020, 0, 0, 0)
PARSE_SFDP
OTP_INFO(256, 3, 0x1000, 0x1000) },
{ "w25q512nwm", INFO(0xef8020, 0, 64 * 1024, 1024)
PARSE_SFDP
OTP_INFO(256, 3, 0x1000, 0x1000) },
{ "w25q512jvq", INFO(0xef4020, 0, 64 * 1024, 1024)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
};
/**
* winbond_nor_write_ear() - Write Extended Address Register.
* @nor: pointer to 'struct spi_nor'.
* @ear: value to write to the Extended Address Register.
*
* Return: 0 on success, -errno otherwise.
*/
static int winbond_nor_write_ear(struct spi_nor *nor, u8 ear)
{
int ret;
nor->bouncebuf[0] = ear;
if (nor->spimem) {
struct spi_mem_op op = WINBOND_NOR_WREAR_OP(nor->bouncebuf);
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = spi_nor_controller_ops_write_reg(nor,
WINBOND_NOR_OP_WREAR,
nor->bouncebuf, 1);
}
if (ret)
dev_dbg(nor->dev, "error %d writing EAR\n", ret);
return ret;
}
/**
* winbond_nor_set_4byte_addr_mode() - Set 4-byte address mode for Winbond
* flashes.
* @nor: pointer to 'struct spi_nor'.
* @enable: true to enter the 4-byte address mode, false to exit the 4-byte
* address mode.
*
* Return: 0 on success, -errno otherwise.
*/
static int winbond_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
{
int ret;
ret = spi_nor_set_4byte_addr_mode_en4b_ex4b(nor, enable);
if (ret || enable)
return ret;
/*
* On Winbond W25Q256FV, leaving 4byte mode causes the Extended Address
* Register to be set to 1, so all 3-byte-address reads come from the
* second 16M. We must clear the register to enable normal behavior.
*/
ret = spi_nor_write_enable(nor);
if (ret)
return ret;
ret = winbond_nor_write_ear(nor, 0);
if (ret)
return ret;
return spi_nor_write_disable(nor);
}
static const struct spi_nor_otp_ops winbond_nor_otp_ops = {
.read = spi_nor_otp_read_secr,
.write = spi_nor_otp_write_secr,
.erase = spi_nor_otp_erase_secr,
.lock = spi_nor_otp_lock_sr2,
.is_locked = spi_nor_otp_is_locked_sr2,
};
static int winbond_nor_late_init(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
if (params->otp.org->n_regions)
params->otp.ops = &winbond_nor_otp_ops;
/*
* Winbond seems to require that the Extended Address Register to be set
* to zero when exiting the 4-Byte Address Mode, at least for W25Q256FV.
* This requirement is not described in the JESD216 SFDP standard, thus
* it is Winbond specific. Since we do not know if other Winbond flashes
* have the same requirement, play safe and overwrite the method parsed
* from BFPT, if any.
*/
params->set_4byte_addr_mode = winbond_nor_set_4byte_addr_mode;
return 0;
}
static const struct spi_nor_fixups winbond_nor_fixups = {
.late_init = winbond_nor_late_init,
};
const struct spi_nor_manufacturer spi_nor_winbond = {
.name = "winbond",
.parts = winbond_nor_parts,
.nparts = ARRAY_SIZE(winbond_nor_parts),
.fixups = &winbond_nor_fixups,
};
| linux-master | drivers/mtd/spi-nor/winbond.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Based on m25p80.c, by Mike Lavender ([email protected]), with
* influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/spi-nor.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/sched/task_stack.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/spi/flash.h>
#include "core.h"
/* Define max times to check status register before we give up. */
/*
* For everything but full-chip erase; probably could be much smaller, but kept
* around for safety for now
*/
#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
/*
* For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
* for larger flash
*/
#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
#define SPI_NOR_MAX_ADDR_NBYTES 4
#define SPI_NOR_SRST_SLEEP_MIN 200
#define SPI_NOR_SRST_SLEEP_MAX 400
/**
* spi_nor_get_cmd_ext() - Get the command opcode extension based on the
* extension type.
* @nor: pointer to a 'struct spi_nor'
* @op: pointer to the 'struct spi_mem_op' whose properties
* need to be initialized.
*
* Right now, only "repeat" and "invert" are supported.
*
* Return: The opcode extension.
*/
static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor,
const struct spi_mem_op *op)
{
switch (nor->cmd_ext_type) {
case SPI_NOR_EXT_INVERT:
return ~op->cmd.opcode;
case SPI_NOR_EXT_REPEAT:
return op->cmd.opcode;
default:
dev_err(nor->dev, "Unknown command extension type\n");
return 0;
}
}
/**
* spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op.
* @nor: pointer to a 'struct spi_nor'
* @op: pointer to the 'struct spi_mem_op' whose properties
* need to be initialized.
* @proto: the protocol from which the properties need to be set.
*/
void spi_nor_spimem_setup_op(const struct spi_nor *nor,
struct spi_mem_op *op,
const enum spi_nor_protocol proto)
{
u8 ext;
op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto);
if (op->addr.nbytes)
op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
if (op->dummy.nbytes)
op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
if (op->data.nbytes)
op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
if (spi_nor_protocol_is_dtr(proto)) {
/*
* SPIMEM supports mixed DTR modes, but right now we can only
* have all phases either DTR or STR. IOW, SPIMEM can have
* something like 4S-4D-4D, but SPI NOR can't. So, set all 4
* phases to either DTR or STR.
*/
op->cmd.dtr = true;
op->addr.dtr = true;
op->dummy.dtr = true;
op->data.dtr = true;
/* 2 bytes per clock cycle in DTR mode. */
op->dummy.nbytes *= 2;
ext = spi_nor_get_cmd_ext(nor, op);
op->cmd.opcode = (op->cmd.opcode << 8) | ext;
op->cmd.nbytes = 2;
}
}
/**
* spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
* transfer
* @nor: pointer to 'struct spi_nor'
* @op: pointer to 'struct spi_mem_op' template for transfer
*
* If we have to use the bounce buffer, the data field in @op will be updated.
*
* Return: true if the bounce buffer is needed, false if not
*/
static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
{
/* op->data.buf.in occupies the same memory as op->data.buf.out */
if (object_is_on_stack(op->data.buf.in) ||
!virt_addr_valid(op->data.buf.in)) {
if (op->data.nbytes > nor->bouncebuf_size)
op->data.nbytes = nor->bouncebuf_size;
op->data.buf.in = nor->bouncebuf;
return true;
}
return false;
}
/**
* spi_nor_spimem_exec_op() - execute a memory operation
* @nor: pointer to 'struct spi_nor'
* @op: pointer to 'struct spi_mem_op' template for transfer
*
* Return: 0 on success, -error otherwise.
*/
static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
{
int error;
error = spi_mem_adjust_op_size(nor->spimem, op);
if (error)
return error;
return spi_mem_exec_op(nor->spimem, op);
}
int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
u8 *buf, size_t len)
{
if (spi_nor_protocol_is_dtr(nor->reg_proto))
return -EOPNOTSUPP;
return nor->controller_ops->read_reg(nor, opcode, buf, len);
}
int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
const u8 *buf, size_t len)
{
if (spi_nor_protocol_is_dtr(nor->reg_proto))
return -EOPNOTSUPP;
return nor->controller_ops->write_reg(nor, opcode, buf, len);
}
static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
{
if (spi_nor_protocol_is_dtr(nor->reg_proto))
return -EOPNOTSUPP;
return nor->controller_ops->erase(nor, offs);
}
/**
* spi_nor_spimem_read_data() - read data from flash's memory region via
* spi-mem
* @nor: pointer to 'struct spi_nor'
* @from: offset to read from
* @len: number of bytes to read
* @buf: pointer to dst buffer
*
* Return: number of bytes read successfully, -errno otherwise
*/
static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
size_t len, u8 *buf)
{
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
SPI_MEM_OP_ADDR(nor->addr_nbytes, from, 0),
SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
SPI_MEM_OP_DATA_IN(len, buf, 0));
bool usebouncebuf;
ssize_t nbytes;
int error;
spi_nor_spimem_setup_op(nor, &op, nor->read_proto);
/* convert the dummy cycles to the number of bytes */
op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
if (spi_nor_protocol_is_dtr(nor->read_proto))
op.dummy.nbytes *= 2;
usebouncebuf = spi_nor_spimem_bounce(nor, &op);
if (nor->dirmap.rdesc) {
nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
op.data.nbytes, op.data.buf.in);
} else {
error = spi_nor_spimem_exec_op(nor, &op);
if (error)
return error;
nbytes = op.data.nbytes;
}
if (usebouncebuf && nbytes > 0)
memcpy(buf, op.data.buf.in, nbytes);
return nbytes;
}
/**
* spi_nor_read_data() - read data from flash memory
* @nor: pointer to 'struct spi_nor'
* @from: offset to read from
* @len: number of bytes to read
* @buf: pointer to dst buffer
*
* Return: number of bytes read successfully, -errno otherwise
*/
ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
{
if (nor->spimem)
return spi_nor_spimem_read_data(nor, from, len, buf);
return nor->controller_ops->read(nor, from, len, buf);
}
/**
* spi_nor_spimem_write_data() - write data to flash memory via
* spi-mem
* @nor: pointer to 'struct spi_nor'
* @to: offset to write to
* @len: number of bytes to write
* @buf: pointer to src buffer
*
* Return: number of bytes written successfully, -errno otherwise
*/
static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
size_t len, const u8 *buf)
{
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
SPI_MEM_OP_ADDR(nor->addr_nbytes, to, 0),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(len, buf, 0));
ssize_t nbytes;
int error;
if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
op.addr.nbytes = 0;
spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
if (spi_nor_spimem_bounce(nor, &op))
memcpy(nor->bouncebuf, buf, op.data.nbytes);
if (nor->dirmap.wdesc) {
nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
op.data.nbytes, op.data.buf.out);
} else {
error = spi_nor_spimem_exec_op(nor, &op);
if (error)
return error;
nbytes = op.data.nbytes;
}
return nbytes;
}
/**
* spi_nor_write_data() - write data to flash memory
* @nor: pointer to 'struct spi_nor'
* @to: offset to write to
* @len: number of bytes to write
* @buf: pointer to src buffer
*
* Return: number of bytes written successfully, -errno otherwise
*/
ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
const u8 *buf)
{
if (nor->spimem)
return spi_nor_spimem_write_data(nor, to, len, buf);
return nor->controller_ops->write(nor, to, len, buf);
}
/**
* spi_nor_read_any_reg() - read any register from flash memory, nonvolatile or
* volatile.
* @nor: pointer to 'struct spi_nor'.
* @op: SPI memory operation. op->data.buf must be DMA-able.
* @proto: SPI protocol to use for the register operation.
*
* Return: zero on success, -errno otherwise
*/
int spi_nor_read_any_reg(struct spi_nor *nor, struct spi_mem_op *op,
enum spi_nor_protocol proto)
{
if (!nor->spimem)
return -EOPNOTSUPP;
spi_nor_spimem_setup_op(nor, op, proto);
return spi_nor_spimem_exec_op(nor, op);
}
/**
* spi_nor_write_any_volatile_reg() - write any volatile register to flash
* memory.
* @nor: pointer to 'struct spi_nor'
* @op: SPI memory operation. op->data.buf must be DMA-able.
* @proto: SPI protocol to use for the register operation.
*
* Writing volatile registers are instant according to some manufacturers
* (Cypress, Micron) and do not need any status polling.
*
* Return: zero on success, -errno otherwise
*/
int spi_nor_write_any_volatile_reg(struct spi_nor *nor, struct spi_mem_op *op,
enum spi_nor_protocol proto)
{
int ret;
if (!nor->spimem)
return -EOPNOTSUPP;
ret = spi_nor_write_enable(nor);
if (ret)
return ret;
spi_nor_spimem_setup_op(nor, op, proto);
return spi_nor_spimem_exec_op(nor, op);
}
/**
* spi_nor_write_enable() - Set write enable latch with Write Enable command.
* @nor: pointer to 'struct spi_nor'.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_write_enable(struct spi_nor *nor)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op = SPI_NOR_WREN_OP;
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN,
NULL, 0);
}
if (ret)
dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
return ret;
}
/**
* spi_nor_write_disable() - Send Write Disable instruction to the chip.
* @nor: pointer to 'struct spi_nor'.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_write_disable(struct spi_nor *nor)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op = SPI_NOR_WRDI_OP;
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI,
NULL, 0);
}
if (ret)
dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
return ret;
}
/**
* spi_nor_read_id() - Read the JEDEC ID.
* @nor: pointer to 'struct spi_nor'.
* @naddr: number of address bytes to send. Can be zero if the operation
* does not need to send an address.
* @ndummy: number of dummy bytes to send after an opcode or address. Can
* be zero if the operation does not require dummy bytes.
* @id: pointer to a DMA-able buffer where the value of the JEDEC ID
* will be written.
* @proto: the SPI protocol for register operation.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_read_id(struct spi_nor *nor, u8 naddr, u8 ndummy, u8 *id,
enum spi_nor_protocol proto)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op =
SPI_NOR_READID_OP(naddr, ndummy, id, SPI_NOR_MAX_ID_LEN);
spi_nor_spimem_setup_op(nor, &op, proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
SPI_NOR_MAX_ID_LEN);
}
return ret;
}
/**
* spi_nor_read_sr() - Read the Status Register.
* @nor: pointer to 'struct spi_nor'.
* @sr: pointer to a DMA-able buffer where the value of the
* Status Register will be written. Should be at least 2 bytes.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op = SPI_NOR_RDSR_OP(sr);
if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
op.addr.nbytes = nor->params->rdsr_addr_nbytes;
op.dummy.nbytes = nor->params->rdsr_dummy;
/*
* We don't want to read only one byte in DTR mode. So,
* read 2 and then discard the second byte.
*/
op.data.nbytes = 2;
}
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr,
1);
}
if (ret)
dev_dbg(nor->dev, "error %d reading SR\n", ret);
return ret;
}
/**
* spi_nor_read_cr() - Read the Configuration Register using the
* SPINOR_OP_RDCR (35h) command.
* @nor: pointer to 'struct spi_nor'
* @cr: pointer to a DMA-able buffer where the value of the
* Configuration Register will be written.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op = SPI_NOR_RDCR_OP(cr);
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr,
1);
}
if (ret)
dev_dbg(nor->dev, "error %d reading CR\n", ret);
return ret;
}
/**
* spi_nor_set_4byte_addr_mode_en4b_ex4b() - Enter/Exit 4-byte address mode
* using SPINOR_OP_EN4B/SPINOR_OP_EX4B. Typically used by
* Winbond and Macronix.
* @nor: pointer to 'struct spi_nor'.
* @enable: true to enter the 4-byte address mode, false to exit the 4-byte
* address mode.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_set_4byte_addr_mode_en4b_ex4b(struct spi_nor *nor, bool enable)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op = SPI_NOR_EN4B_EX4B_OP(enable);
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = spi_nor_controller_ops_write_reg(nor,
enable ? SPINOR_OP_EN4B :
SPINOR_OP_EX4B,
NULL, 0);
}
if (ret)
dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
return ret;
}
/**
* spi_nor_set_4byte_addr_mode_wren_en4b_ex4b() - Set 4-byte address mode using
* SPINOR_OP_WREN followed by SPINOR_OP_EN4B or SPINOR_OP_EX4B. Typically used
* by ST and Micron flashes.
* @nor: pointer to 'struct spi_nor'.
* @enable: true to enter the 4-byte address mode, false to exit the 4-byte
* address mode.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_set_4byte_addr_mode_wren_en4b_ex4b(struct spi_nor *nor, bool enable)
{
int ret;
ret = spi_nor_write_enable(nor);
if (ret)
return ret;
ret = spi_nor_set_4byte_addr_mode_en4b_ex4b(nor, enable);
if (ret)
return ret;
return spi_nor_write_disable(nor);
}
/**
* spi_nor_set_4byte_addr_mode_brwr() - Set 4-byte address mode using
* SPINOR_OP_BRWR. Typically used by Spansion flashes.
* @nor: pointer to 'struct spi_nor'.
* @enable: true to enter the 4-byte address mode, false to exit the 4-byte
* address mode.
*
* 8-bit volatile bank register used to define A[30:A24] bits. MSB (bit[7]) is
* used to enable/disable 4-byte address mode. When MSB is set to ‘1’, 4-byte
* address mode is active and A[30:24] bits are don’t care. Write instruction is
* SPINOR_OP_BRWR(17h) with 1 byte of data.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_set_4byte_addr_mode_brwr(struct spi_nor *nor, bool enable)
{
int ret;
nor->bouncebuf[0] = enable << 7;
if (nor->spimem) {
struct spi_mem_op op = SPI_NOR_BRWR_OP(nor->bouncebuf);
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR,
nor->bouncebuf, 1);
}
if (ret)
dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
return ret;
}
/**
* spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
* for new commands.
* @nor: pointer to 'struct spi_nor'.
*
* Return: 1 if ready, 0 if not ready, -errno on errors.
*/
int spi_nor_sr_ready(struct spi_nor *nor)
{
int ret;
ret = spi_nor_read_sr(nor, nor->bouncebuf);
if (ret)
return ret;
return !(nor->bouncebuf[0] & SR_WIP);
}
/**
* spi_nor_use_parallel_locking() - Checks if RWW locking scheme shall be used
* @nor: pointer to 'struct spi_nor'.
*
* Return: true if parallel locking is enabled, false otherwise.
*/
static bool spi_nor_use_parallel_locking(struct spi_nor *nor)
{
return nor->flags & SNOR_F_RWW;
}
/* Locking helpers for status read operations */
static int spi_nor_rww_start_rdst(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
int ret = -EAGAIN;
mutex_lock(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd)
goto busy;
rww->ongoing_io = true;
rww->ongoing_rd = true;
ret = 0;
busy:
mutex_unlock(&nor->lock);
return ret;
}
static void spi_nor_rww_end_rdst(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
mutex_lock(&nor->lock);
rww->ongoing_io = false;
rww->ongoing_rd = false;
mutex_unlock(&nor->lock);
}
static int spi_nor_lock_rdst(struct spi_nor *nor)
{
if (spi_nor_use_parallel_locking(nor))
return spi_nor_rww_start_rdst(nor);
return 0;
}
static void spi_nor_unlock_rdst(struct spi_nor *nor)
{
if (spi_nor_use_parallel_locking(nor)) {
spi_nor_rww_end_rdst(nor);
wake_up(&nor->rww.wait);
}
}
/**
* spi_nor_ready() - Query the flash to see if it is ready for new commands.
* @nor: pointer to 'struct spi_nor'.
*
* Return: 1 if ready, 0 if not ready, -errno on errors.
*/
static int spi_nor_ready(struct spi_nor *nor)
{
int ret;
ret = spi_nor_lock_rdst(nor);
if (ret)
return 0;
/* Flashes might override the standard routine. */
if (nor->params->ready)
ret = nor->params->ready(nor);
else
ret = spi_nor_sr_ready(nor);
spi_nor_unlock_rdst(nor);
return ret;
}
/**
* spi_nor_wait_till_ready_with_timeout() - Service routine to read the
* Status Register until ready, or timeout occurs.
* @nor: pointer to "struct spi_nor".
* @timeout_jiffies: jiffies to wait until timeout.
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
unsigned long timeout_jiffies)
{
unsigned long deadline;
int timeout = 0, ret;
deadline = jiffies + timeout_jiffies;
while (!timeout) {
if (time_after_eq(jiffies, deadline))
timeout = 1;
ret = spi_nor_ready(nor);
if (ret < 0)
return ret;
if (ret)
return 0;
cond_resched();
}
dev_dbg(nor->dev, "flash operation timed out\n");
return -ETIMEDOUT;
}
/**
* spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
* flash to be ready, or timeout occurs.
* @nor: pointer to "struct spi_nor".
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_wait_till_ready(struct spi_nor *nor)
{
return spi_nor_wait_till_ready_with_timeout(nor,
DEFAULT_READY_WAIT_JIFFIES);
}
/**
* spi_nor_global_block_unlock() - Unlock Global Block Protection.
* @nor: pointer to 'struct spi_nor'.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_global_block_unlock(struct spi_nor *nor)
{
int ret;
ret = spi_nor_write_enable(nor);
if (ret)
return ret;
if (nor->spimem) {
struct spi_mem_op op = SPI_NOR_GBULK_OP;
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK,
NULL, 0);
}
if (ret) {
dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret);
return ret;
}
return spi_nor_wait_till_ready(nor);
}
/**
* spi_nor_write_sr() - Write the Status Register.
* @nor: pointer to 'struct spi_nor'.
* @sr: pointer to DMA-able buffer to write to the Status Register.
* @len: number of bytes to write to the Status Register.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
{
int ret;
ret = spi_nor_write_enable(nor);
if (ret)
return ret;
if (nor->spimem) {
struct spi_mem_op op = SPI_NOR_WRSR_OP(sr, len);
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr,
len);
}
if (ret) {
dev_dbg(nor->dev, "error %d writing SR\n", ret);
return ret;
}
return spi_nor_wait_till_ready(nor);
}
/**
* spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
* ensure that the byte written match the received value.
* @nor: pointer to a 'struct spi_nor'.
* @sr1: byte value to be written to the Status Register.
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
{
int ret;
nor->bouncebuf[0] = sr1;
ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
if (ret)
return ret;
ret = spi_nor_read_sr(nor, nor->bouncebuf);
if (ret)
return ret;
if (nor->bouncebuf[0] != sr1) {
dev_dbg(nor->dev, "SR1: read back test failed\n");
return -EIO;
}
return 0;
}
/**
* spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
* Status Register 2 in one shot. Ensure that the byte written in the Status
* Register 1 match the received value, and that the 16-bit Write did not
* affect what was already in the Status Register 2.
* @nor: pointer to a 'struct spi_nor'.
* @sr1: byte value to be written to the Status Register 1.
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
{
int ret;
u8 *sr_cr = nor->bouncebuf;
u8 cr_written;
/* Make sure we don't overwrite the contents of Status Register 2. */
if (!(nor->flags & SNOR_F_NO_READ_CR)) {
ret = spi_nor_read_cr(nor, &sr_cr[1]);
if (ret)
return ret;
} else if (spi_nor_get_protocol_width(nor->read_proto) == 4 &&
spi_nor_get_protocol_width(nor->write_proto) == 4 &&
nor->params->quad_enable) {
/*
* If the Status Register 2 Read command (35h) is not
* supported, we should at least be sure we don't
* change the value of the SR2 Quad Enable bit.
*
* When the Quad Enable method is set and the buswidth is 4, we
* can safely assume that the value of the QE bit is one, as a
* consequence of the nor->params->quad_enable() call.
*
* According to the JESD216 revB standard, BFPT DWORDS[15],
* bits 22:20, the 16-bit Write Status (01h) command is
* available just for the cases in which the QE bit is
* described in SR2 at BIT(1).
*/
sr_cr[1] = SR2_QUAD_EN_BIT1;
} else {
sr_cr[1] = 0;
}
sr_cr[0] = sr1;
ret = spi_nor_write_sr(nor, sr_cr, 2);
if (ret)
return ret;
ret = spi_nor_read_sr(nor, sr_cr);
if (ret)
return ret;
if (sr1 != sr_cr[0]) {
dev_dbg(nor->dev, "SR: Read back test failed\n");
return -EIO;
}
if (nor->flags & SNOR_F_NO_READ_CR)
return 0;
cr_written = sr_cr[1];
ret = spi_nor_read_cr(nor, &sr_cr[1]);
if (ret)
return ret;
if (cr_written != sr_cr[1]) {
dev_dbg(nor->dev, "CR: read back test failed\n");
return -EIO;
}
return 0;
}
/**
* spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
* Configuration Register in one shot. Ensure that the byte written in the
* Configuration Register match the received value, and that the 16-bit Write
* did not affect what was already in the Status Register 1.
* @nor: pointer to a 'struct spi_nor'.
* @cr: byte value to be written to the Configuration Register.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
{
int ret;
u8 *sr_cr = nor->bouncebuf;
u8 sr_written;
/* Keep the current value of the Status Register 1. */
ret = spi_nor_read_sr(nor, sr_cr);
if (ret)
return ret;
sr_cr[1] = cr;
ret = spi_nor_write_sr(nor, sr_cr, 2);
if (ret)
return ret;
sr_written = sr_cr[0];
ret = spi_nor_read_sr(nor, sr_cr);
if (ret)
return ret;
if (sr_written != sr_cr[0]) {
dev_dbg(nor->dev, "SR: Read back test failed\n");
return -EIO;
}
if (nor->flags & SNOR_F_NO_READ_CR)
return 0;
ret = spi_nor_read_cr(nor, &sr_cr[1]);
if (ret)
return ret;
if (cr != sr_cr[1]) {
dev_dbg(nor->dev, "CR: read back test failed\n");
return -EIO;
}
return 0;
}
/**
* spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
* the byte written match the received value without affecting other bits in the
* Status Register 1 and 2.
* @nor: pointer to a 'struct spi_nor'.
* @sr1: byte value to be written to the Status Register.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
{
if (nor->flags & SNOR_F_HAS_16BIT_SR)
return spi_nor_write_16bit_sr_and_check(nor, sr1);
return spi_nor_write_sr1_and_check(nor, sr1);
}
/**
* spi_nor_write_sr2() - Write the Status Register 2 using the
* SPINOR_OP_WRSR2 (3eh) command.
* @nor: pointer to 'struct spi_nor'.
* @sr2: pointer to DMA-able buffer to write to the Status Register 2.
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
{
int ret;
ret = spi_nor_write_enable(nor);
if (ret)
return ret;
if (nor->spimem) {
struct spi_mem_op op = SPI_NOR_WRSR2_OP(sr2);
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2,
sr2, 1);
}
if (ret) {
dev_dbg(nor->dev, "error %d writing SR2\n", ret);
return ret;
}
return spi_nor_wait_till_ready(nor);
}
/**
* spi_nor_read_sr2() - Read the Status Register 2 using the
* SPINOR_OP_RDSR2 (3fh) command.
* @nor: pointer to 'struct spi_nor'.
* @sr2: pointer to DMA-able buffer where the value of the
* Status Register 2 will be written.
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op = SPI_NOR_RDSR2_OP(sr2);
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2,
1);
}
if (ret)
dev_dbg(nor->dev, "error %d reading SR2\n", ret);
return ret;
}
/**
* spi_nor_erase_chip() - Erase the entire flash memory.
* @nor: pointer to 'struct spi_nor'.
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_erase_chip(struct spi_nor *nor)
{
int ret;
dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
if (nor->spimem) {
struct spi_mem_op op = SPI_NOR_CHIP_ERASE_OP;
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = spi_nor_controller_ops_write_reg(nor,
SPINOR_OP_CHIP_ERASE,
NULL, 0);
}
if (ret)
dev_dbg(nor->dev, "error %d erasing chip\n", ret);
return ret;
}
static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
{
size_t i;
for (i = 0; i < size; i++)
if (table[i][0] == opcode)
return table[i][1];
/* No conversion found, keep input op code. */
return opcode;
}
u8 spi_nor_convert_3to4_read(u8 opcode)
{
static const u8 spi_nor_3to4_read[][2] = {
{ SPINOR_OP_READ, SPINOR_OP_READ_4B },
{ SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
{ SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
{ SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
{ SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
{ SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
{ SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
{ SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
{ SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
{ SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
{ SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
};
return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
ARRAY_SIZE(spi_nor_3to4_read));
}
static u8 spi_nor_convert_3to4_program(u8 opcode)
{
static const u8 spi_nor_3to4_program[][2] = {
{ SPINOR_OP_PP, SPINOR_OP_PP_4B },
{ SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
{ SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
{ SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
{ SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
};
return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
ARRAY_SIZE(spi_nor_3to4_program));
}
static u8 spi_nor_convert_3to4_erase(u8 opcode)
{
static const u8 spi_nor_3to4_erase[][2] = {
{ SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
{ SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
{ SPINOR_OP_SE, SPINOR_OP_SE_4B },
};
return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
ARRAY_SIZE(spi_nor_3to4_erase));
}
static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
{
return !!nor->params->erase_map.uniform_erase_type;
}
static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
{
nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
if (!spi_nor_has_uniform_erase(nor)) {
struct spi_nor_erase_map *map = &nor->params->erase_map;
struct spi_nor_erase_type *erase;
int i;
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
erase = &map->erase_type[i];
erase->opcode =
spi_nor_convert_3to4_erase(erase->opcode);
}
}
}
static int spi_nor_prep(struct spi_nor *nor)
{
int ret = 0;
if (nor->controller_ops && nor->controller_ops->prepare)
ret = nor->controller_ops->prepare(nor);
return ret;
}
static void spi_nor_unprep(struct spi_nor *nor)
{
if (nor->controller_ops && nor->controller_ops->unprepare)
nor->controller_ops->unprepare(nor);
}
static void spi_nor_offset_to_banks(u64 bank_size, loff_t start, size_t len,
u8 *first, u8 *last)
{
/* This is currently safe, the number of banks being very small */
*first = DIV_ROUND_DOWN_ULL(start, bank_size);
*last = DIV_ROUND_DOWN_ULL(start + len - 1, bank_size);
}
/* Generic helpers for internal locking and serialization */
static bool spi_nor_rww_start_io(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
bool start = false;
mutex_lock(&nor->lock);
if (rww->ongoing_io)
goto busy;
rww->ongoing_io = true;
start = true;
busy:
mutex_unlock(&nor->lock);
return start;
}
static void spi_nor_rww_end_io(struct spi_nor *nor)
{
mutex_lock(&nor->lock);
nor->rww.ongoing_io = false;
mutex_unlock(&nor->lock);
}
static int spi_nor_lock_device(struct spi_nor *nor)
{
if (!spi_nor_use_parallel_locking(nor))
return 0;
return wait_event_killable(nor->rww.wait, spi_nor_rww_start_io(nor));
}
static void spi_nor_unlock_device(struct spi_nor *nor)
{
if (spi_nor_use_parallel_locking(nor)) {
spi_nor_rww_end_io(nor);
wake_up(&nor->rww.wait);
}
}
/* Generic helpers for internal locking and serialization */
static bool spi_nor_rww_start_exclusive(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
bool start = false;
mutex_lock(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
goto busy;
rww->ongoing_io = true;
rww->ongoing_rd = true;
rww->ongoing_pe = true;
start = true;
busy:
mutex_unlock(&nor->lock);
return start;
}
static void spi_nor_rww_end_exclusive(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
mutex_lock(&nor->lock);
rww->ongoing_io = false;
rww->ongoing_rd = false;
rww->ongoing_pe = false;
mutex_unlock(&nor->lock);
}
int spi_nor_prep_and_lock(struct spi_nor *nor)
{
int ret;
ret = spi_nor_prep(nor);
if (ret)
return ret;
if (!spi_nor_use_parallel_locking(nor))
mutex_lock(&nor->lock);
else
ret = wait_event_killable(nor->rww.wait,
spi_nor_rww_start_exclusive(nor));
return ret;
}
void spi_nor_unlock_and_unprep(struct spi_nor *nor)
{
if (!spi_nor_use_parallel_locking(nor)) {
mutex_unlock(&nor->lock);
} else {
spi_nor_rww_end_exclusive(nor);
wake_up(&nor->rww.wait);
}
spi_nor_unprep(nor);
}
/* Internal locking helpers for program and erase operations */
static bool spi_nor_rww_start_pe(struct spi_nor *nor, loff_t start, size_t len)
{
struct spi_nor_rww *rww = &nor->rww;
unsigned int used_banks = 0;
bool started = false;
u8 first, last;
int bank;
mutex_lock(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
goto busy;
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++) {
if (rww->used_banks & BIT(bank))
goto busy;
used_banks |= BIT(bank);
}
rww->used_banks |= used_banks;
rww->ongoing_pe = true;
started = true;
busy:
mutex_unlock(&nor->lock);
return started;
}
static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len)
{
struct spi_nor_rww *rww = &nor->rww;
u8 first, last;
int bank;
mutex_lock(&nor->lock);
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++)
rww->used_banks &= ~BIT(bank);
rww->ongoing_pe = false;
mutex_unlock(&nor->lock);
}
static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t len)
{
int ret;
ret = spi_nor_prep(nor);
if (ret)
return ret;
if (!spi_nor_use_parallel_locking(nor))
mutex_lock(&nor->lock);
else
ret = wait_event_killable(nor->rww.wait,
spi_nor_rww_start_pe(nor, start, len));
return ret;
}
static void spi_nor_unlock_and_unprep_pe(struct spi_nor *nor, loff_t start, size_t len)
{
if (!spi_nor_use_parallel_locking(nor)) {
mutex_unlock(&nor->lock);
} else {
spi_nor_rww_end_pe(nor, start, len);
wake_up(&nor->rww.wait);
}
spi_nor_unprep(nor);
}
/* Internal locking helpers for read operations */
static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len)
{
struct spi_nor_rww *rww = &nor->rww;
unsigned int used_banks = 0;
bool started = false;
u8 first, last;
int bank;
mutex_lock(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd)
goto busy;
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++) {
if (rww->used_banks & BIT(bank))
goto busy;
used_banks |= BIT(bank);
}
rww->used_banks |= used_banks;
rww->ongoing_io = true;
rww->ongoing_rd = true;
started = true;
busy:
mutex_unlock(&nor->lock);
return started;
}
static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
{
struct spi_nor_rww *rww = &nor->rww;
u8 first, last;
int bank;
mutex_lock(&nor->lock);
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++)
nor->rww.used_banks &= ~BIT(bank);
rww->ongoing_io = false;
rww->ongoing_rd = false;
mutex_unlock(&nor->lock);
}
static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len)
{
int ret;
ret = spi_nor_prep(nor);
if (ret)
return ret;
if (!spi_nor_use_parallel_locking(nor))
mutex_lock(&nor->lock);
else
ret = wait_event_killable(nor->rww.wait,
spi_nor_rww_start_rd(nor, start, len));
return ret;
}
static void spi_nor_unlock_and_unprep_rd(struct spi_nor *nor, loff_t start, size_t len)
{
if (!spi_nor_use_parallel_locking(nor)) {
mutex_unlock(&nor->lock);
} else {
spi_nor_rww_end_rd(nor, start, len);
wake_up(&nor->rww.wait);
}
spi_nor_unprep(nor);
}
static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
{
if (!nor->params->convert_addr)
return addr;
return nor->params->convert_addr(nor, addr);
}
/*
* Initiate the erasure of a single sector
*/
int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
{
int i;
addr = spi_nor_convert_addr(nor, addr);
if (nor->spimem) {
struct spi_mem_op op =
SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode,
nor->addr_nbytes, addr);
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
return spi_mem_exec_op(nor->spimem, &op);
} else if (nor->controller_ops->erase) {
return spi_nor_controller_ops_erase(nor, addr);
}
/*
* Default implementation, if driver doesn't have a specialized HW
* control
*/
for (i = nor->addr_nbytes - 1; i >= 0; i--) {
nor->bouncebuf[i] = addr & 0xff;
addr >>= 8;
}
return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode,
nor->bouncebuf, nor->addr_nbytes);
}
/**
* spi_nor_div_by_erase_size() - calculate remainder and update new dividend
* @erase: pointer to a structure that describes a SPI NOR erase type
* @dividend: dividend value
* @remainder: pointer to u32 remainder (will be updated)
*
* Return: the result of the division
*/
static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
u64 dividend, u32 *remainder)
{
/* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
*remainder = (u32)dividend & erase->size_mask;
return dividend >> erase->size_shift;
}
/**
* spi_nor_find_best_erase_type() - find the best erase type for the given
* offset in the serial flash memory and the
* number of bytes to erase. The region in
* which the address fits is expected to be
* provided.
* @map: the erase map of the SPI NOR
* @region: pointer to a structure that describes a SPI NOR erase region
* @addr: offset in the serial flash memory
* @len: number of bytes to erase
*
* Return: a pointer to the best fitted erase type, NULL otherwise.
*/
static const struct spi_nor_erase_type *
spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
const struct spi_nor_erase_region *region,
u64 addr, u32 len)
{
const struct spi_nor_erase_type *erase;
u32 rem;
int i;
u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
/*
* Erase types are ordered by size, with the smallest erase type at
* index 0.
*/
for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
/* Does the erase region support the tested erase type? */
if (!(erase_mask & BIT(i)))
continue;
erase = &map->erase_type[i];
if (!erase->size)
continue;
/* Alignment is not mandatory for overlaid regions */
if (region->offset & SNOR_OVERLAID_REGION &&
region->size <= len)
return erase;
/* Don't erase more than what the user has asked for. */
if (erase->size > len)
continue;
spi_nor_div_by_erase_size(erase, addr, &rem);
if (!rem)
return erase;
}
return NULL;
}
static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
{
return region->offset & SNOR_LAST_REGION;
}
static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
{
return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
}
/**
* spi_nor_region_next() - get the next spi nor region
* @region: pointer to a structure that describes a SPI NOR erase region
*
* Return: the next spi nor region or NULL if last region.
*/
struct spi_nor_erase_region *
spi_nor_region_next(struct spi_nor_erase_region *region)
{
if (spi_nor_region_is_last(region))
return NULL;
region++;
return region;
}
/**
* spi_nor_find_erase_region() - find the region of the serial flash memory in
* which the offset fits
* @map: the erase map of the SPI NOR
* @addr: offset in the serial flash memory
*
* Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
* otherwise.
*/
static struct spi_nor_erase_region *
spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
{
struct spi_nor_erase_region *region = map->regions;
u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
u64 region_end = region_start + region->size;
while (addr < region_start || addr >= region_end) {
region = spi_nor_region_next(region);
if (!region)
return ERR_PTR(-EINVAL);
region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
region_end = region_start + region->size;
}
return region;
}
/**
* spi_nor_init_erase_cmd() - initialize an erase command
* @region: pointer to a structure that describes a SPI NOR erase region
* @erase: pointer to a structure that describes a SPI NOR erase type
*
* Return: the pointer to the allocated erase command, ERR_PTR(-errno)
* otherwise.
*/
static struct spi_nor_erase_command *
spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
const struct spi_nor_erase_type *erase)
{
struct spi_nor_erase_command *cmd;
cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&cmd->list);
cmd->opcode = erase->opcode;
cmd->count = 1;
if (region->offset & SNOR_OVERLAID_REGION)
cmd->size = region->size;
else
cmd->size = erase->size;
return cmd;
}
/**
* spi_nor_destroy_erase_cmd_list() - destroy erase command list
* @erase_list: list of erase commands
*/
static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
{
struct spi_nor_erase_command *cmd, *next;
list_for_each_entry_safe(cmd, next, erase_list, list) {
list_del(&cmd->list);
kfree(cmd);
}
}
/**
* spi_nor_init_erase_cmd_list() - initialize erase command list
* @nor: pointer to a 'struct spi_nor'
* @erase_list: list of erase commands to be executed once we validate that the
* erase can be performed
* @addr: offset in the serial flash memory
* @len: number of bytes to erase
*
* Builds the list of best fitted erase commands and verifies if the erase can
* be performed.
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
struct list_head *erase_list,
u64 addr, u32 len)
{
const struct spi_nor_erase_map *map = &nor->params->erase_map;
const struct spi_nor_erase_type *erase, *prev_erase = NULL;
struct spi_nor_erase_region *region;
struct spi_nor_erase_command *cmd = NULL;
u64 region_end;
int ret = -EINVAL;
region = spi_nor_find_erase_region(map, addr);
if (IS_ERR(region))
return PTR_ERR(region);
region_end = spi_nor_region_end(region);
while (len) {
erase = spi_nor_find_best_erase_type(map, region, addr, len);
if (!erase)
goto destroy_erase_cmd_list;
if (prev_erase != erase ||
erase->size != cmd->size ||
region->offset & SNOR_OVERLAID_REGION) {
cmd = spi_nor_init_erase_cmd(region, erase);
if (IS_ERR(cmd)) {
ret = PTR_ERR(cmd);
goto destroy_erase_cmd_list;
}
list_add_tail(&cmd->list, erase_list);
} else {
cmd->count++;
}
addr += cmd->size;
len -= cmd->size;
if (len && addr >= region_end) {
region = spi_nor_region_next(region);
if (!region)
goto destroy_erase_cmd_list;
region_end = spi_nor_region_end(region);
}
prev_erase = erase;
}
return 0;
destroy_erase_cmd_list:
spi_nor_destroy_erase_cmd_list(erase_list);
return ret;
}
/**
* spi_nor_erase_multi_sectors() - perform a non-uniform erase
* @nor: pointer to a 'struct spi_nor'
* @addr: offset in the serial flash memory
* @len: number of bytes to erase
*
* Build a list of best fitted erase commands and execute it once we validate
* that the erase can be performed.
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
{
LIST_HEAD(erase_list);
struct spi_nor_erase_command *cmd, *next;
int ret;
ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
if (ret)
return ret;
list_for_each_entry_safe(cmd, next, &erase_list, list) {
nor->erase_opcode = cmd->opcode;
while (cmd->count) {
dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n",
cmd->size, cmd->opcode, cmd->count);
ret = spi_nor_lock_device(nor);
if (ret)
goto destroy_erase_cmd_list;
ret = spi_nor_write_enable(nor);
if (ret) {
spi_nor_unlock_device(nor);
goto destroy_erase_cmd_list;
}
ret = spi_nor_erase_sector(nor, addr);
spi_nor_unlock_device(nor);
if (ret)
goto destroy_erase_cmd_list;
ret = spi_nor_wait_till_ready(nor);
if (ret)
goto destroy_erase_cmd_list;
addr += cmd->size;
cmd->count--;
}
list_del(&cmd->list);
kfree(cmd);
}
return 0;
destroy_erase_cmd_list:
spi_nor_destroy_erase_cmd_list(&erase_list);
return ret;
}
/*
* Erase an address range on the nor chip. The address range may extend
* one or more erase sectors. Return an error if there is a problem erasing.
*/
static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
u32 addr, len;
uint32_t rem;
int ret;
dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
(long long)instr->len);
if (spi_nor_has_uniform_erase(nor)) {
div_u64_rem(instr->len, mtd->erasesize, &rem);
if (rem)
return -EINVAL;
}
addr = instr->addr;
len = instr->len;
ret = spi_nor_prep_and_lock_pe(nor, instr->addr, instr->len);
if (ret)
return ret;
/* whole-chip erase? */
if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
unsigned long timeout;
ret = spi_nor_lock_device(nor);
if (ret)
goto erase_err;
ret = spi_nor_write_enable(nor);
if (ret) {
spi_nor_unlock_device(nor);
goto erase_err;
}
ret = spi_nor_erase_chip(nor);
spi_nor_unlock_device(nor);
if (ret)
goto erase_err;
/*
* Scale the timeout linearly with the size of the flash, with
* a minimum calibrated to an old 2MB flash. We could try to
* pull these from CFI/SFDP, but these values should be good
* enough for now.
*/
timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
(unsigned long)(mtd->size / SZ_2M));
ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
if (ret)
goto erase_err;
/* REVISIT in some cases we could speed up erasing large regions
* by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
* to use "small sector erase", but that's not always optimal.
*/
/* "sector"-at-a-time erase */
} else if (spi_nor_has_uniform_erase(nor)) {
while (len) {
ret = spi_nor_lock_device(nor);
if (ret)
goto erase_err;
ret = spi_nor_write_enable(nor);
if (ret) {
spi_nor_unlock_device(nor);
goto erase_err;
}
ret = spi_nor_erase_sector(nor, addr);
spi_nor_unlock_device(nor);
if (ret)
goto erase_err;
ret = spi_nor_wait_till_ready(nor);
if (ret)
goto erase_err;
addr += mtd->erasesize;
len -= mtd->erasesize;
}
/* erase multiple sectors */
} else {
ret = spi_nor_erase_multi_sectors(nor, addr, len);
if (ret)
goto erase_err;
}
ret = spi_nor_write_disable(nor);
erase_err:
spi_nor_unlock_and_unprep_pe(nor, instr->addr, instr->len);
return ret;
}
/**
* spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
* Register 1.
* @nor: pointer to a 'struct spi_nor'
*
* Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
{
int ret;
ret = spi_nor_read_sr(nor, nor->bouncebuf);
if (ret)
return ret;
if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
return 0;
nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
}
/**
* spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
* Register 2.
* @nor: pointer to a 'struct spi_nor'.
*
* Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
{
int ret;
if (nor->flags & SNOR_F_NO_READ_CR)
return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
ret = spi_nor_read_cr(nor, nor->bouncebuf);
if (ret)
return ret;
if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
return 0;
nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
}
/**
* spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
* @nor: pointer to a 'struct spi_nor'
*
* Set the Quad Enable (QE) bit in the Status Register 2.
*
* This is one of the procedures to set the QE bit described in the SFDP
* (JESD216 rev B) specification but no manufacturer using this procedure has
* been identified yet, hence the name of the function.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
{
u8 *sr2 = nor->bouncebuf;
int ret;
u8 sr2_written;
/* Check current Quad Enable bit value. */
ret = spi_nor_read_sr2(nor, sr2);
if (ret)
return ret;
if (*sr2 & SR2_QUAD_EN_BIT7)
return 0;
/* Update the Quad Enable bit. */
*sr2 |= SR2_QUAD_EN_BIT7;
ret = spi_nor_write_sr2(nor, sr2);
if (ret)
return ret;
sr2_written = *sr2;
/* Read back and check it. */
ret = spi_nor_read_sr2(nor, sr2);
if (ret)
return ret;
if (*sr2 != sr2_written) {
dev_dbg(nor->dev, "SR2: Read back test failed\n");
return -EIO;
}
return 0;
}
static const struct spi_nor_manufacturer *manufacturers[] = {
&spi_nor_atmel,
&spi_nor_catalyst,
&spi_nor_eon,
&spi_nor_esmt,
&spi_nor_everspin,
&spi_nor_fujitsu,
&spi_nor_gigadevice,
&spi_nor_intel,
&spi_nor_issi,
&spi_nor_macronix,
&spi_nor_micron,
&spi_nor_st,
&spi_nor_spansion,
&spi_nor_sst,
&spi_nor_winbond,
&spi_nor_xilinx,
&spi_nor_xmc,
};
static const struct flash_info spi_nor_generic_flash = {
.name = "spi-nor-generic",
.n_banks = 1,
/*
* JESD216 rev A doesn't specify the page size, therefore we need a
* sane default.
*/
.page_size = 256,
.parse_sfdp = true,
};
static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
const u8 *id)
{
const struct flash_info *part;
unsigned int i, j;
for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
for (j = 0; j < manufacturers[i]->nparts; j++) {
part = &manufacturers[i]->parts[j];
if (part->id_len &&
!memcmp(part->id, id, part->id_len)) {
nor->manufacturer = manufacturers[i];
return part;
}
}
}
return NULL;
}
static const struct flash_info *spi_nor_detect(struct spi_nor *nor)
{
const struct flash_info *info;
u8 *id = nor->bouncebuf;
int ret;
ret = spi_nor_read_id(nor, 0, 0, id, nor->reg_proto);
if (ret) {
dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
return ERR_PTR(ret);
}
/* Cache the complete flash ID. */
nor->id = devm_kmemdup(nor->dev, id, SPI_NOR_MAX_ID_LEN, GFP_KERNEL);
if (!nor->id)
return ERR_PTR(-ENOMEM);
info = spi_nor_match_id(nor, id);
/* Fallback to a generic flash described only by its SFDP data. */
if (!info) {
ret = spi_nor_check_sfdp_signature(nor);
if (!ret)
info = &spi_nor_generic_flash;
}
if (!info) {
dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
SPI_NOR_MAX_ID_LEN, id);
return ERR_PTR(-ENODEV);
}
return info;
}
static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
loff_t from_lock = from;
size_t len_lock = len;
ssize_t ret;
dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
ret = spi_nor_prep_and_lock_rd(nor, from_lock, len_lock);
if (ret)
return ret;
while (len) {
loff_t addr = from;
addr = spi_nor_convert_addr(nor, addr);
ret = spi_nor_read_data(nor, addr, len, buf);
if (ret == 0) {
/* We shouldn't see 0-length reads */
ret = -EIO;
goto read_err;
}
if (ret < 0)
goto read_err;
WARN_ON(ret > len);
*retlen += ret;
buf += ret;
from += ret;
len -= ret;
}
ret = 0;
read_err:
spi_nor_unlock_and_unprep_rd(nor, from_lock, len_lock);
return ret;
}
/*
* Write an address range to the nor chip. Data must be written in
* FLASH_PAGESIZE chunks. The address range may be any size provided
* it is within the physical boundaries.
*/
static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
size_t page_offset, page_remain, i;
ssize_t ret;
u32 page_size = nor->params->page_size;
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
ret = spi_nor_prep_and_lock_pe(nor, to, len);
if (ret)
return ret;
for (i = 0; i < len; ) {
ssize_t written;
loff_t addr = to + i;
/*
* If page_size is a power of two, the offset can be quickly
* calculated with an AND operation. On the other cases we
* need to do a modulus operation (more expensive).
*/
if (is_power_of_2(page_size)) {
page_offset = addr & (page_size - 1);
} else {
uint64_t aux = addr;
page_offset = do_div(aux, page_size);
}
/* the size of data remaining on the first page */
page_remain = min_t(size_t, page_size - page_offset, len - i);
addr = spi_nor_convert_addr(nor, addr);
ret = spi_nor_lock_device(nor);
if (ret)
goto write_err;
ret = spi_nor_write_enable(nor);
if (ret) {
spi_nor_unlock_device(nor);
goto write_err;
}
ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
spi_nor_unlock_device(nor);
if (ret < 0)
goto write_err;
written = ret;
ret = spi_nor_wait_till_ready(nor);
if (ret)
goto write_err;
*retlen += written;
i += written;
}
write_err:
spi_nor_unlock_and_unprep_pe(nor, to, len);
return ret;
}
static int spi_nor_check(struct spi_nor *nor)
{
if (!nor->dev ||
(!nor->spimem && !nor->controller_ops) ||
(!nor->spimem && nor->controller_ops &&
(!nor->controller_ops->read ||
!nor->controller_ops->write ||
!nor->controller_ops->read_reg ||
!nor->controller_ops->write_reg))) {
pr_err("spi-nor: please fill all the necessary fields!\n");
return -EINVAL;
}
if (nor->spimem && nor->controller_ops) {
dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
return -EINVAL;
}
return 0;
}
void
spi_nor_set_read_settings(struct spi_nor_read_command *read,
u8 num_mode_clocks,
u8 num_wait_states,
u8 opcode,
enum spi_nor_protocol proto)
{
read->num_mode_clocks = num_mode_clocks;
read->num_wait_states = num_wait_states;
read->opcode = opcode;
read->proto = proto;
}
void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
enum spi_nor_protocol proto)
{
pp->opcode = opcode;
pp->proto = proto;
}
static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
{
size_t i;
for (i = 0; i < size; i++)
if (table[i][0] == (int)hwcaps)
return table[i][1];
return -EINVAL;
}
int spi_nor_hwcaps_read2cmd(u32 hwcaps)
{
static const int hwcaps_read2cmd[][2] = {
{ SNOR_HWCAPS_READ, SNOR_CMD_READ },
{ SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
{ SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
{ SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
{ SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
{ SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
{ SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
{ SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
{ SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
{ SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
{ SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
{ SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
{ SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
{ SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
{ SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
{ SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR },
};
return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
ARRAY_SIZE(hwcaps_read2cmd));
}
int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
{
static const int hwcaps_pp2cmd[][2] = {
{ SNOR_HWCAPS_PP, SNOR_CMD_PP },
{ SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
{ SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
{ SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
{ SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
{ SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
{ SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
{ SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR },
};
return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
ARRAY_SIZE(hwcaps_pp2cmd));
}
/**
* spi_nor_spimem_check_op - check if the operation is supported
* by controller
*@nor: pointer to a 'struct spi_nor'
*@op: pointer to op template to be checked
*
* Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
*/
static int spi_nor_spimem_check_op(struct spi_nor *nor,
struct spi_mem_op *op)
{
/*
* First test with 4 address bytes. The opcode itself might
* be a 3B addressing opcode but we don't care, because
* SPI controller implementation should not check the opcode,
* but just the sequence.
*/
op->addr.nbytes = 4;
if (!spi_mem_supports_op(nor->spimem, op)) {
if (nor->params->size > SZ_16M)
return -EOPNOTSUPP;
/* If flash size <= 16MB, 3 address bytes are sufficient */
op->addr.nbytes = 3;
if (!spi_mem_supports_op(nor->spimem, op))
return -EOPNOTSUPP;
}
return 0;
}
/**
* spi_nor_spimem_check_readop - check if the read op is supported
* by controller
*@nor: pointer to a 'struct spi_nor'
*@read: pointer to op template to be checked
*
* Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
*/
static int spi_nor_spimem_check_readop(struct spi_nor *nor,
const struct spi_nor_read_command *read)
{
struct spi_mem_op op = SPI_NOR_READ_OP(read->opcode);
spi_nor_spimem_setup_op(nor, &op, read->proto);
/* convert the dummy cycles to the number of bytes */
op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
op.dummy.buswidth / 8;
if (spi_nor_protocol_is_dtr(nor->read_proto))
op.dummy.nbytes *= 2;
return spi_nor_spimem_check_op(nor, &op);
}
/**
* spi_nor_spimem_check_pp - check if the page program op is supported
* by controller
*@nor: pointer to a 'struct spi_nor'
*@pp: pointer to op template to be checked
*
* Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
*/
static int spi_nor_spimem_check_pp(struct spi_nor *nor,
const struct spi_nor_pp_command *pp)
{
struct spi_mem_op op = SPI_NOR_PP_OP(pp->opcode);
spi_nor_spimem_setup_op(nor, &op, pp->proto);
return spi_nor_spimem_check_op(nor, &op);
}
/**
* spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
* based on SPI controller capabilities
* @nor: pointer to a 'struct spi_nor'
* @hwcaps: pointer to resulting capabilities after adjusting
* according to controller and flash's capability
*/
static void
spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
{
struct spi_nor_flash_parameter *params = nor->params;
unsigned int cap;
/* X-X-X modes are not supported yet, mask them all. */
*hwcaps &= ~SNOR_HWCAPS_X_X_X;
/*
* If the reset line is broken, we do not want to enter a stateful
* mode.
*/
if (nor->flags & SNOR_F_BROKEN_RESET)
*hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR);
for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
int rdidx, ppidx;
if (!(*hwcaps & BIT(cap)))
continue;
rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
if (rdidx >= 0 &&
spi_nor_spimem_check_readop(nor, ¶ms->reads[rdidx]))
*hwcaps &= ~BIT(cap);
ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
if (ppidx < 0)
continue;
if (spi_nor_spimem_check_pp(nor,
¶ms->page_programs[ppidx]))
*hwcaps &= ~BIT(cap);
}
}
/**
* spi_nor_set_erase_type() - set a SPI NOR erase type
* @erase: pointer to a structure that describes a SPI NOR erase type
* @size: the size of the sector/block erased by the erase type
* @opcode: the SPI command op code to erase the sector/block
*/
void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
u8 opcode)
{
erase->size = size;
erase->opcode = opcode;
/* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
erase->size_shift = ffs(erase->size) - 1;
erase->size_mask = (1 << erase->size_shift) - 1;
}
/**
* spi_nor_mask_erase_type() - mask out a SPI NOR erase type
* @erase: pointer to a structure that describes a SPI NOR erase type
*/
void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase)
{
erase->size = 0;
}
/**
* spi_nor_init_uniform_erase_map() - Initialize uniform erase map
* @map: the erase map of the SPI NOR
* @erase_mask: bitmask encoding erase types that can erase the entire
* flash memory
* @flash_size: the spi nor flash memory size
*/
void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
u8 erase_mask, u64 flash_size)
{
/* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
SNOR_LAST_REGION;
map->uniform_region.size = flash_size;
map->regions = &map->uniform_region;
map->uniform_erase_type = erase_mask;
}
int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
int ret;
if (nor->manufacturer && nor->manufacturer->fixups &&
nor->manufacturer->fixups->post_bfpt) {
ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
bfpt);
if (ret)
return ret;
}
if (nor->info->fixups && nor->info->fixups->post_bfpt)
return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt);
return 0;
}
static int spi_nor_select_read(struct spi_nor *nor,
u32 shared_hwcaps)
{
int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
const struct spi_nor_read_command *read;
if (best_match < 0)
return -EINVAL;
cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
if (cmd < 0)
return -EINVAL;
read = &nor->params->reads[cmd];
nor->read_opcode = read->opcode;
nor->read_proto = read->proto;
/*
* In the SPI NOR framework, we don't need to make the difference
* between mode clock cycles and wait state clock cycles.
* Indeed, the value of the mode clock cycles is used by a QSPI
* flash memory to know whether it should enter or leave its 0-4-4
* (Continuous Read / XIP) mode.
* eXecution In Place is out of the scope of the mtd sub-system.
* Hence we choose to merge both mode and wait state clock cycles
* into the so called dummy clock cycles.
*/
nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
return 0;
}
static int spi_nor_select_pp(struct spi_nor *nor,
u32 shared_hwcaps)
{
int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
const struct spi_nor_pp_command *pp;
if (best_match < 0)
return -EINVAL;
cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
if (cmd < 0)
return -EINVAL;
pp = &nor->params->page_programs[cmd];
nor->program_opcode = pp->opcode;
nor->write_proto = pp->proto;
return 0;
}
/**
* spi_nor_select_uniform_erase() - select optimum uniform erase type
* @map: the erase map of the SPI NOR
* @wanted_size: the erase type size to search for. Contains the value of
* info->sector_size, the "small sector" size in case
* CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined or 0 if
* there is no information about the sector size. The
* latter is the case if the flash parameters are parsed
* solely by SFDP, then the largest supported erase type
* is selected.
*
* Once the optimum uniform sector erase command is found, disable all the
* other.
*
* Return: pointer to erase type on success, NULL otherwise.
*/
static const struct spi_nor_erase_type *
spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
const u32 wanted_size)
{
const struct spi_nor_erase_type *tested_erase, *erase = NULL;
int i;
u8 uniform_erase_type = map->uniform_erase_type;
for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
if (!(uniform_erase_type & BIT(i)))
continue;
tested_erase = &map->erase_type[i];
/* Skip masked erase types. */
if (!tested_erase->size)
continue;
/*
* If the current erase size is the one, stop here:
* we have found the right uniform Sector Erase command.
*/
if (tested_erase->size == wanted_size) {
erase = tested_erase;
break;
}
/*
* Otherwise, the current erase size is still a valid candidate.
* Select the biggest valid candidate.
*/
if (!erase && tested_erase->size)
erase = tested_erase;
/* keep iterating to find the wanted_size */
}
if (!erase)
return NULL;
/* Disable all other Sector Erase commands. */
map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
map->uniform_erase_type |= BIT(erase - map->erase_type);
return erase;
}
static int spi_nor_select_erase(struct spi_nor *nor)
{
struct spi_nor_erase_map *map = &nor->params->erase_map;
const struct spi_nor_erase_type *erase = NULL;
struct mtd_info *mtd = &nor->mtd;
u32 wanted_size = nor->info->sector_size;
int i;
/*
* The previous implementation handling Sector Erase commands assumed
* that the SPI flash memory has an uniform layout then used only one
* of the supported erase sizes for all Sector Erase commands.
* So to be backward compatible, the new implementation also tries to
* manage the SPI flash memory as uniform with a single erase sector
* size, when possible.
*/
#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
/* prefer "small sector" erase if possible */
wanted_size = 4096u;
#endif
if (spi_nor_has_uniform_erase(nor)) {
erase = spi_nor_select_uniform_erase(map, wanted_size);
if (!erase)
return -EINVAL;
nor->erase_opcode = erase->opcode;
mtd->erasesize = erase->size;
return 0;
}
/*
* For non-uniform SPI flash memory, set mtd->erasesize to the
* maximum erase sector size. No need to set nor->erase_opcode.
*/
for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
if (map->erase_type[i].size) {
erase = &map->erase_type[i];
break;
}
}
if (!erase)
return -EINVAL;
mtd->erasesize = erase->size;
return 0;
}
static int spi_nor_default_setup(struct spi_nor *nor,
const struct spi_nor_hwcaps *hwcaps)
{
struct spi_nor_flash_parameter *params = nor->params;
u32 ignored_mask, shared_mask;
int err;
/*
* Keep only the hardware capabilities supported by both the SPI
* controller and the SPI flash memory.
*/
shared_mask = hwcaps->mask & params->hwcaps.mask;
if (nor->spimem) {
/*
* When called from spi_nor_probe(), all caps are set and we
* need to discard some of them based on what the SPI
* controller actually supports (using spi_mem_supports_op()).
*/
spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
} else {
/*
* SPI n-n-n protocols are not supported when the SPI
* controller directly implements the spi_nor interface.
* Yet another reason to switch to spi-mem.
*/
ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR;
if (shared_mask & ignored_mask) {
dev_dbg(nor->dev,
"SPI n-n-n protocols are not supported.\n");
shared_mask &= ~ignored_mask;
}
}
/* Select the (Fast) Read command. */
err = spi_nor_select_read(nor, shared_mask);
if (err) {
dev_dbg(nor->dev,
"can't select read settings supported by both the SPI controller and memory.\n");
return err;
}
/* Select the Page Program command. */
err = spi_nor_select_pp(nor, shared_mask);
if (err) {
dev_dbg(nor->dev,
"can't select write settings supported by both the SPI controller and memory.\n");
return err;
}
/* Select the Sector Erase command. */
err = spi_nor_select_erase(nor);
if (err) {
dev_dbg(nor->dev,
"can't select erase settings supported by both the SPI controller and memory.\n");
return err;
}
return 0;
}
static int spi_nor_set_addr_nbytes(struct spi_nor *nor)
{
if (nor->params->addr_nbytes) {
nor->addr_nbytes = nor->params->addr_nbytes;
} else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
/*
* In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
* in this protocol an odd addr_nbytes cannot be used because
* then the address phase would only span a cycle and a half.
* Half a cycle would be left over. We would then have to start
* the dummy phase in the middle of a cycle and so too the data
* phase, and we will end the transaction with half a cycle left
* over.
*
* Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
* avoid this situation.
*/
nor->addr_nbytes = 4;
} else if (nor->info->addr_nbytes) {
nor->addr_nbytes = nor->info->addr_nbytes;
} else {
nor->addr_nbytes = 3;
}
if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) {
/* enable 4-byte addressing if the device exceeds 16MiB */
nor->addr_nbytes = 4;
}
if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) {
dev_dbg(nor->dev, "The number of address bytes is too large: %u\n",
nor->addr_nbytes);
return -EINVAL;
}
/* Set 4byte opcodes when possible. */
if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES &&
!(nor->flags & SNOR_F_HAS_4BAIT))
spi_nor_set_4byte_opcodes(nor);
return 0;
}
static int spi_nor_setup(struct spi_nor *nor,
const struct spi_nor_hwcaps *hwcaps)
{
int ret;
if (nor->params->setup)
ret = nor->params->setup(nor, hwcaps);
else
ret = spi_nor_default_setup(nor, hwcaps);
if (ret)
return ret;
return spi_nor_set_addr_nbytes(nor);
}
/**
* spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
* settings based on MFR register and ->default_init() hook.
* @nor: pointer to a 'struct spi_nor'.
*/
static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
{
if (nor->manufacturer && nor->manufacturer->fixups &&
nor->manufacturer->fixups->default_init)
nor->manufacturer->fixups->default_init(nor);
if (nor->info->fixups && nor->info->fixups->default_init)
nor->info->fixups->default_init(nor);
}
/**
* spi_nor_no_sfdp_init_params() - Initialize the flash's parameters and
* settings based on nor->info->sfdp_flags. This method should be called only by
* flashes that do not define SFDP tables. If the flash supports SFDP but the
* information is wrong and the settings from this function can not be retrieved
* by parsing SFDP, one should instead use the fixup hooks and update the wrong
* bits.
* @nor: pointer to a 'struct spi_nor'.
*/
static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
struct spi_nor_erase_map *map = ¶ms->erase_map;
const u8 no_sfdp_flags = nor->info->no_sfdp_flags;
u8 i, erase_mask;
if (no_sfdp_flags & SPI_NOR_DUAL_READ) {
params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2],
0, 8, SPINOR_OP_READ_1_1_2,
SNOR_PROTO_1_1_2);
}
if (no_sfdp_flags & SPI_NOR_QUAD_READ) {
params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4],
0, 8, SPINOR_OP_READ_1_1_4,
SNOR_PROTO_1_1_4);
}
if (no_sfdp_flags & SPI_NOR_OCTAL_READ) {
params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8],
0, 8, SPINOR_OP_READ_1_1_8,
SNOR_PROTO_1_1_8);
}
if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_READ) {
params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR],
0, 20, SPINOR_OP_READ_FAST,
SNOR_PROTO_8_8_8_DTR);
}
if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_PP) {
params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
/*
* Since xSPI Page Program opcode is backward compatible with
* Legacy SPI, use Legacy SPI opcode there as well.
*/
spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR],
SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR);
}
/*
* Sector Erase settings. Sort Erase Types in ascending order, with the
* smallest erase size starting at BIT(0).
*/
erase_mask = 0;
i = 0;
if (no_sfdp_flags & SECT_4K) {
erase_mask |= BIT(i);
spi_nor_set_erase_type(&map->erase_type[i], 4096u,
SPINOR_OP_BE_4K);
i++;
}
erase_mask |= BIT(i);
spi_nor_set_erase_type(&map->erase_type[i], nor->info->sector_size,
SPINOR_OP_SE);
spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
}
/**
* spi_nor_init_flags() - Initialize NOR flags for settings that are not defined
* in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP.
* @nor: pointer to a 'struct spi_nor'
*/
static void spi_nor_init_flags(struct spi_nor *nor)
{
struct device_node *np = spi_nor_get_flash_node(nor);
const u16 flags = nor->info->flags;
if (of_property_read_bool(np, "broken-flash-reset"))
nor->flags |= SNOR_F_BROKEN_RESET;
if (of_property_read_bool(np, "no-wp"))
nor->flags |= SNOR_F_NO_WP;
if (flags & SPI_NOR_SWP_IS_VOLATILE)
nor->flags |= SNOR_F_SWP_IS_VOLATILE;
if (flags & SPI_NOR_HAS_LOCK)
nor->flags |= SNOR_F_HAS_LOCK;
if (flags & SPI_NOR_HAS_TB) {
nor->flags |= SNOR_F_HAS_SR_TB;
if (flags & SPI_NOR_TB_SR_BIT6)
nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
}
if (flags & SPI_NOR_4BIT_BP) {
nor->flags |= SNOR_F_HAS_4BIT_BP;
if (flags & SPI_NOR_BP3_SR_BIT6)
nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
}
if (flags & NO_CHIP_ERASE)
nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
if (flags & SPI_NOR_RWW && nor->info->n_banks > 1 &&
!nor->controller_ops)
nor->flags |= SNOR_F_RWW;
}
/**
* spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not
* be discovered by SFDP for this particular flash because the SFDP table that
* indicates this support is not defined in the flash. In case the table for
* this support is defined but has wrong values, one should instead use a
* post_sfdp() hook to set the SNOR_F equivalent flag.
* @nor: pointer to a 'struct spi_nor'
*/
static void spi_nor_init_fixup_flags(struct spi_nor *nor)
{
const u8 fixup_flags = nor->info->fixup_flags;
if (fixup_flags & SPI_NOR_4B_OPCODES)
nor->flags |= SNOR_F_4B_OPCODES;
if (fixup_flags & SPI_NOR_IO_MODE_EN_VOLATILE)
nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
}
/**
* spi_nor_late_init_params() - Late initialization of default flash parameters.
* @nor: pointer to a 'struct spi_nor'
*
* Used to initialize flash parameters that are not declared in the JESD216
* SFDP standard, or where SFDP tables are not defined at all.
* Will replace the spi_nor_manufacturer_init_params() method.
*/
static int spi_nor_late_init_params(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
int ret;
if (nor->manufacturer && nor->manufacturer->fixups &&
nor->manufacturer->fixups->late_init) {
ret = nor->manufacturer->fixups->late_init(nor);
if (ret)
return ret;
}
if (nor->info->fixups && nor->info->fixups->late_init) {
ret = nor->info->fixups->late_init(nor);
if (ret)
return ret;
}
/* Default method kept for backward compatibility. */
if (!params->set_4byte_addr_mode)
params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_brwr;
spi_nor_init_flags(nor);
spi_nor_init_fixup_flags(nor);
/*
* NOR protection support. When locking_ops are not provided, we pick
* the default ones.
*/
if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
spi_nor_init_default_locking_ops(nor);
if (nor->info->n_banks > 1)
params->bank_size = div64_u64(params->size, nor->info->n_banks);
return 0;
}
/**
* spi_nor_sfdp_init_params_deprecated() - Deprecated way of initializing flash
* parameters and settings based on JESD216 SFDP standard.
* @nor: pointer to a 'struct spi_nor'.
*
* The method has a roll-back mechanism: in case the SFDP parsing fails, the
* legacy flash parameters and settings will be restored.
*/
static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor)
{
struct spi_nor_flash_parameter sfdp_params;
memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
if (spi_nor_parse_sfdp(nor)) {
memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
nor->flags &= ~SNOR_F_4B_OPCODES;
}
}
/**
* spi_nor_init_params_deprecated() - Deprecated way of initializing flash
* parameters and settings.
* @nor: pointer to a 'struct spi_nor'.
*
* The method assumes that flash doesn't support SFDP so it initializes flash
* parameters in spi_nor_no_sfdp_init_params() which later on can be overwritten
* when parsing SFDP, if supported.
*/
static void spi_nor_init_params_deprecated(struct spi_nor *nor)
{
spi_nor_no_sfdp_init_params(nor);
spi_nor_manufacturer_init_params(nor);
if (nor->info->no_sfdp_flags & (SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ |
SPI_NOR_OCTAL_READ |
SPI_NOR_OCTAL_DTR_READ))
spi_nor_sfdp_init_params_deprecated(nor);
}
/**
* spi_nor_init_default_params() - Default initialization of flash parameters
* and settings. Done for all flashes, regardless is they define SFDP tables
* or not.
* @nor: pointer to a 'struct spi_nor'.
*/
static void spi_nor_init_default_params(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
const struct flash_info *info = nor->info;
struct device_node *np = spi_nor_get_flash_node(nor);
params->quad_enable = spi_nor_sr2_bit1_quad_enable;
params->otp.org = &info->otp_org;
/* Default to 16-bit Write Status (01h) Command */
nor->flags |= SNOR_F_HAS_16BIT_SR;
/* Set SPI NOR sizes. */
params->writesize = 1;
params->size = (u64)info->sector_size * info->n_sectors;
params->bank_size = params->size;
params->page_size = info->page_size;
if (!(info->flags & SPI_NOR_NO_FR)) {
/* Default to Fast Read for DT and non-DT platform devices. */
params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
/* Mask out Fast Read if not requested at DT instantiation. */
if (np && !of_property_read_bool(np, "m25p,fast-read"))
params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
}
/* (Fast) Read settings. */
params->hwcaps.mask |= SNOR_HWCAPS_READ;
spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ],
0, 0, SPINOR_OP_READ,
SNOR_PROTO_1_1_1);
if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST],
0, 8, SPINOR_OP_READ_FAST,
SNOR_PROTO_1_1_1);
/* Page Program settings. */
params->hwcaps.mask |= SNOR_HWCAPS_PP;
spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
SPINOR_OP_PP, SNOR_PROTO_1_1_1);
if (info->flags & SPI_NOR_QUAD_PP) {
params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_1_1_4],
SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4);
}
}
/**
* spi_nor_init_params() - Initialize the flash's parameters and settings.
* @nor: pointer to a 'struct spi_nor'.
*
* The flash parameters and settings are initialized based on a sequence of
* calls that are ordered by priority:
*
* 1/ Default flash parameters initialization. The initializations are done
* based on nor->info data:
* spi_nor_info_init_params()
*
* which can be overwritten by:
* 2/ Manufacturer flash parameters initialization. The initializations are
* done based on MFR register, or when the decisions can not be done solely
* based on MFR, by using specific flash_info tweeks, ->default_init():
* spi_nor_manufacturer_init_params()
*
* which can be overwritten by:
* 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
* should be more accurate that the above.
* spi_nor_parse_sfdp() or spi_nor_no_sfdp_init_params()
*
* Please note that there is a ->post_bfpt() fixup hook that can overwrite
* the flash parameters and settings immediately after parsing the Basic
* Flash Parameter Table.
* spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed.
* It is used to tweak various flash parameters when information provided
* by the SFDP tables are wrong.
*
* which can be overwritten by:
* 4/ Late flash parameters initialization, used to initialize flash
* parameters that are not declared in the JESD216 SFDP standard, or where SFDP
* tables are not defined at all.
* spi_nor_late_init_params()
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_init_params(struct spi_nor *nor)
{
int ret;
nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
if (!nor->params)
return -ENOMEM;
spi_nor_init_default_params(nor);
if (nor->info->parse_sfdp) {
ret = spi_nor_parse_sfdp(nor);
if (ret) {
dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n");
return ret;
}
} else if (nor->info->no_sfdp_flags & SPI_NOR_SKIP_SFDP) {
spi_nor_no_sfdp_init_params(nor);
} else {
spi_nor_init_params_deprecated(nor);
}
return spi_nor_late_init_params(nor);
}
/** spi_nor_set_octal_dtr() - enable or disable Octal DTR I/O.
* @nor: pointer to a 'struct spi_nor'
* @enable: whether to enable or disable Octal DTR
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
{
int ret;
if (!nor->params->set_octal_dtr)
return 0;
if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
nor->write_proto == SNOR_PROTO_8_8_8_DTR))
return 0;
if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
return 0;
ret = nor->params->set_octal_dtr(nor, enable);
if (ret)
return ret;
if (enable)
nor->reg_proto = SNOR_PROTO_8_8_8_DTR;
else
nor->reg_proto = SNOR_PROTO_1_1_1;
return 0;
}
/**
* spi_nor_quad_enable() - enable Quad I/O if needed.
* @nor: pointer to a 'struct spi_nor'
*
* Return: 0 on success, -errno otherwise.
*/
static int spi_nor_quad_enable(struct spi_nor *nor)
{
if (!nor->params->quad_enable)
return 0;
if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
spi_nor_get_protocol_width(nor->write_proto) == 4))
return 0;
return nor->params->quad_enable(nor);
}
/**
* spi_nor_set_4byte_addr_mode() - Set address mode.
* @nor: pointer to a 'struct spi_nor'.
* @enable: enable/disable 4 byte address mode.
*
* Return: 0 on success, -errno otherwise.
*/
int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
{
struct spi_nor_flash_parameter *params = nor->params;
int ret;
ret = params->set_4byte_addr_mode(nor, enable);
if (ret && ret != -ENOTSUPP)
return ret;
if (enable) {
params->addr_nbytes = 4;
params->addr_mode_nbytes = 4;
} else {
params->addr_nbytes = 3;
params->addr_mode_nbytes = 3;
}
return 0;
}
static int spi_nor_init(struct spi_nor *nor)
{
int err;
err = spi_nor_set_octal_dtr(nor, true);
if (err) {
dev_dbg(nor->dev, "octal mode not supported\n");
return err;
}
err = spi_nor_quad_enable(nor);
if (err) {
dev_dbg(nor->dev, "quad mode not supported\n");
return err;
}
/*
* Some SPI NOR flashes are write protected by default after a power-on
* reset cycle, in order to avoid inadvertent writes during power-up.
* Backward compatibility imposes to unlock the entire flash memory
* array at power-up by default. Depending on the kernel configuration
* (1) do nothing, (2) always unlock the entire flash array or (3)
* unlock the entire flash array only when the software write
* protection bits are volatile. The latter is indicated by
* SNOR_F_SWP_IS_VOLATILE.
*/
if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) ||
(IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) &&
nor->flags & SNOR_F_SWP_IS_VOLATILE))
spi_nor_try_unlock_all(nor);
if (nor->addr_nbytes == 4 &&
nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
!(nor->flags & SNOR_F_4B_OPCODES)) {
/*
* If the RESET# pin isn't hooked up properly, or the system
* otherwise doesn't perform a reset command in the boot
* sequence, it's impossible to 100% protect against unexpected
* reboots (e.g., crashes). Warn the user (or hopefully, system
* designer) that this is bad.
*/
WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
"enabling reset hack; may not recover from unexpected reboots\n");
err = spi_nor_set_4byte_addr_mode(nor, true);
if (err)
return err;
}
return 0;
}
/**
* spi_nor_soft_reset() - Perform a software reset
* @nor: pointer to 'struct spi_nor'
*
* Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets
* the device to its power-on-reset state. This is useful when the software has
* made some changes to device (volatile) registers and needs to reset it before
* shutting down, for example.
*
* Not every flash supports this sequence. The same set of opcodes might be used
* for some other operation on a flash that does not support this. Support for
* this sequence can be discovered via SFDP in the BFPT table.
*
* Return: 0 on success, -errno otherwise.
*/
static void spi_nor_soft_reset(struct spi_nor *nor)
{
struct spi_mem_op op;
int ret;
op = (struct spi_mem_op)SPINOR_SRSTEN_OP;
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
if (ret) {
dev_warn(nor->dev, "Software reset failed: %d\n", ret);
return;
}
op = (struct spi_mem_op)SPINOR_SRST_OP;
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
if (ret) {
dev_warn(nor->dev, "Software reset failed: %d\n", ret);
return;
}
/*
* Software Reset is not instant, and the delay varies from flash to
* flash. Looking at a few flashes, most range somewhere below 100
* microseconds. So, sleep for a range of 200-400 us.
*/
usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX);
}
/* mtd suspend handler */
static int spi_nor_suspend(struct mtd_info *mtd)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
/* Disable octal DTR mode if we enabled it. */
ret = spi_nor_set_octal_dtr(nor, false);
if (ret)
dev_err(nor->dev, "suspend() failed\n");
return ret;
}
/* mtd resume handler */
static void spi_nor_resume(struct mtd_info *mtd)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
struct device *dev = nor->dev;
int ret;
/* re-initialize the nor chip */
ret = spi_nor_init(nor);
if (ret)
dev_err(dev, "resume() failed\n");
}
static int spi_nor_get_device(struct mtd_info *mtd)
{
struct mtd_info *master = mtd_get_master(mtd);
struct spi_nor *nor = mtd_to_spi_nor(master);
struct device *dev;
if (nor->spimem)
dev = nor->spimem->spi->controller->dev.parent;
else
dev = nor->dev;
if (!try_module_get(dev->driver->owner))
return -ENODEV;
return 0;
}
static void spi_nor_put_device(struct mtd_info *mtd)
{
struct mtd_info *master = mtd_get_master(mtd);
struct spi_nor *nor = mtd_to_spi_nor(master);
struct device *dev;
if (nor->spimem)
dev = nor->spimem->spi->controller->dev.parent;
else
dev = nor->dev;
module_put(dev->driver->owner);
}
static void spi_nor_restore(struct spi_nor *nor)
{
int ret;
/* restore the addressing mode */
if (nor->addr_nbytes == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
nor->flags & SNOR_F_BROKEN_RESET) {
ret = spi_nor_set_4byte_addr_mode(nor, false);
if (ret)
/*
* Do not stop the execution in the hope that the flash
* will default to the 3-byte address mode after the
* software reset.
*/
dev_err(nor->dev, "Failed to exit 4-byte address mode, err = %d\n", ret);
}
if (nor->flags & SNOR_F_SOFT_RESET)
spi_nor_soft_reset(nor);
}
static const struct flash_info *spi_nor_match_name(struct spi_nor *nor,
const char *name)
{
unsigned int i, j;
for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
for (j = 0; j < manufacturers[i]->nparts; j++) {
if (!strcmp(name, manufacturers[i]->parts[j].name)) {
nor->manufacturer = manufacturers[i];
return &manufacturers[i]->parts[j];
}
}
}
return NULL;
}
static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
const char *name)
{
const struct flash_info *info = NULL;
if (name)
info = spi_nor_match_name(nor, name);
/* Try to auto-detect if chip name wasn't specified or not found */
if (!info)
return spi_nor_detect(nor);
/*
* If caller has specified name of flash model that can normally be
* detected using JEDEC, let's verify it.
*/
if (name && info->id_len) {
const struct flash_info *jinfo;
jinfo = spi_nor_detect(nor);
if (IS_ERR(jinfo)) {
return jinfo;
} else if (jinfo != info) {
/*
* JEDEC knows better, so overwrite platform ID. We
* can't trust partitions any longer, but we'll let
* mtd apply them anyway, since some partitions may be
* marked read-only, and we don't want to loose that
* information, even if it's not 100% accurate.
*/
dev_warn(nor->dev, "found %s, expected %s\n",
jinfo->name, info->name);
info = jinfo;
}
}
return info;
}
static void spi_nor_set_mtd_info(struct spi_nor *nor)
{
struct mtd_info *mtd = &nor->mtd;
struct device *dev = nor->dev;
spi_nor_set_mtd_locking_ops(nor);
spi_nor_set_mtd_otp_ops(nor);
mtd->dev.parent = dev;
if (!mtd->name)
mtd->name = dev_name(dev);
mtd->type = MTD_NORFLASH;
mtd->flags = MTD_CAP_NORFLASH;
/* Unset BIT_WRITEABLE to enable JFFS2 write buffer for ECC'd NOR */
if (nor->flags & SNOR_F_ECC)
mtd->flags &= ~MTD_BIT_WRITEABLE;
if (nor->info->flags & SPI_NOR_NO_ERASE)
mtd->flags |= MTD_NO_ERASE;
else
mtd->_erase = spi_nor_erase;
mtd->writesize = nor->params->writesize;
mtd->writebufsize = nor->params->page_size;
mtd->size = nor->params->size;
mtd->_read = spi_nor_read;
/* Might be already set by some SST flashes. */
if (!mtd->_write)
mtd->_write = spi_nor_write;
mtd->_suspend = spi_nor_suspend;
mtd->_resume = spi_nor_resume;
mtd->_get_device = spi_nor_get_device;
mtd->_put_device = spi_nor_put_device;
}
static int spi_nor_hw_reset(struct spi_nor *nor)
{
struct gpio_desc *reset;
reset = devm_gpiod_get_optional(nor->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR_OR_NULL(reset))
return PTR_ERR_OR_ZERO(reset);
/*
* Experimental delay values by looking at different flash device
* vendors datasheets.
*/
usleep_range(1, 5);
gpiod_set_value_cansleep(reset, 1);
usleep_range(100, 150);
gpiod_set_value_cansleep(reset, 0);
usleep_range(1000, 1200);
return 0;
}
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
const struct flash_info *info;
struct device *dev = nor->dev;
struct mtd_info *mtd = &nor->mtd;
int ret;
int i;
ret = spi_nor_check(nor);
if (ret)
return ret;
/* Reset SPI protocol for all commands. */
nor->reg_proto = SNOR_PROTO_1_1_1;
nor->read_proto = SNOR_PROTO_1_1_1;
nor->write_proto = SNOR_PROTO_1_1_1;
/*
* We need the bounce buffer early to read/write registers when going
* through the spi-mem layer (buffers have to be DMA-able).
* For spi-mem drivers, we'll reallocate a new buffer if
* nor->params->page_size turns out to be greater than PAGE_SIZE (which
* shouldn't happen before long since NOR pages are usually less
* than 1KB) after spi_nor_scan() returns.
*/
nor->bouncebuf_size = PAGE_SIZE;
nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
GFP_KERNEL);
if (!nor->bouncebuf)
return -ENOMEM;
ret = spi_nor_hw_reset(nor);
if (ret)
return ret;
info = spi_nor_get_flash_info(nor, name);
if (IS_ERR(info))
return PTR_ERR(info);
nor->info = info;
mutex_init(&nor->lock);
/* Init flash parameters based on flash_info struct and SFDP */
ret = spi_nor_init_params(nor);
if (ret)
return ret;
if (spi_nor_use_parallel_locking(nor))
init_waitqueue_head(&nor->rww.wait);
/*
* Configure the SPI memory:
* - select op codes for (Fast) Read, Page Program and Sector Erase.
* - set the number of dummy cycles (mode cycles + wait states).
* - set the SPI protocols for register and memory accesses.
* - set the number of address bytes.
*/
ret = spi_nor_setup(nor, hwcaps);
if (ret)
return ret;
/* Send all the required SPI flash commands to initialize device */
ret = spi_nor_init(nor);
if (ret)
return ret;
/* No mtd_info fields should be used up to this point. */
spi_nor_set_mtd_info(nor);
dev_info(dev, "%s (%lld Kbytes)\n", info->name,
(long long)mtd->size >> 10);
dev_dbg(dev,
"mtd .name = %s, .size = 0x%llx (%lldMiB), "
".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
if (mtd->numeraseregions)
for (i = 0; i < mtd->numeraseregions; i++)
dev_dbg(dev,
"mtd.eraseregions[%d] = { .offset = 0x%llx, "
".erasesize = 0x%.8x (%uKiB), "
".numblocks = %d }\n",
i, (long long)mtd->eraseregions[i].offset,
mtd->eraseregions[i].erasesize,
mtd->eraseregions[i].erasesize / 1024,
mtd->eraseregions[i].numblocks);
return 0;
}
EXPORT_SYMBOL_GPL(spi_nor_scan);
static int spi_nor_create_read_dirmap(struct spi_nor *nor)
{
struct spi_mem_dirmap_info info = {
.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
SPI_MEM_OP_DATA_IN(0, NULL, 0)),
.offset = 0,
.length = nor->params->size,
};
struct spi_mem_op *op = &info.op_tmpl;
spi_nor_spimem_setup_op(nor, op, nor->read_proto);
/* convert the dummy cycles to the number of bytes */
op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
if (spi_nor_protocol_is_dtr(nor->read_proto))
op->dummy.nbytes *= 2;
/*
* Since spi_nor_spimem_setup_op() only sets buswidth when the number
* of data bytes is non-zero, the data buswidth won't be set here. So,
* do it explicitly.
*/
op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
&info);
return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
}
static int spi_nor_create_write_dirmap(struct spi_nor *nor)
{
struct spi_mem_dirmap_info info = {
.op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
.offset = 0,
.length = nor->params->size,
};
struct spi_mem_op *op = &info.op_tmpl;
if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
op->addr.nbytes = 0;
spi_nor_spimem_setup_op(nor, op, nor->write_proto);
/*
* Since spi_nor_spimem_setup_op() only sets buswidth when the number
* of data bytes is non-zero, the data buswidth won't be set here. So,
* do it explicitly.
*/
op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
&info);
return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
}
static int spi_nor_probe(struct spi_mem *spimem)
{
struct spi_device *spi = spimem->spi;
struct flash_platform_data *data = dev_get_platdata(&spi->dev);
struct spi_nor *nor;
/*
* Enable all caps by default. The core will mask them after
* checking what's really supported using spi_mem_supports_op().
*/
const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
char *flash_name;
int ret;
nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
if (!nor)
return -ENOMEM;
nor->spimem = spimem;
nor->dev = &spi->dev;
spi_nor_set_flash_node(nor, spi->dev.of_node);
spi_mem_set_drvdata(spimem, nor);
if (data && data->name)
nor->mtd.name = data->name;
if (!nor->mtd.name)
nor->mtd.name = spi_mem_get_name(spimem);
/*
* For some (historical?) reason many platforms provide two different
* names in flash_platform_data: "name" and "type". Quite often name is
* set to "m25p80" and then "type" provides a real chip name.
* If that's the case, respect "type" and ignore a "name".
*/
if (data && data->type)
flash_name = data->type;
else if (!strcmp(spi->modalias, "spi-nor"))
flash_name = NULL; /* auto-detect */
else
flash_name = spi->modalias;
ret = spi_nor_scan(nor, flash_name, &hwcaps);
if (ret)
return ret;
spi_nor_debugfs_register(nor);
/*
* None of the existing parts have > 512B pages, but let's play safe
* and add this logic so that if anyone ever adds support for such
* a NOR we don't end up with buffer overflows.
*/
if (nor->params->page_size > PAGE_SIZE) {
nor->bouncebuf_size = nor->params->page_size;
devm_kfree(nor->dev, nor->bouncebuf);
nor->bouncebuf = devm_kmalloc(nor->dev,
nor->bouncebuf_size,
GFP_KERNEL);
if (!nor->bouncebuf)
return -ENOMEM;
}
ret = spi_nor_create_read_dirmap(nor);
if (ret)
return ret;
ret = spi_nor_create_write_dirmap(nor);
if (ret)
return ret;
return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
data ? data->nr_parts : 0);
}
static int spi_nor_remove(struct spi_mem *spimem)
{
struct spi_nor *nor = spi_mem_get_drvdata(spimem);
spi_nor_restore(nor);
/* Clean up MTD stuff. */
return mtd_device_unregister(&nor->mtd);
}
static void spi_nor_shutdown(struct spi_mem *spimem)
{
struct spi_nor *nor = spi_mem_get_drvdata(spimem);
spi_nor_restore(nor);
}
/*
* Do NOT add to this array without reading the following:
*
* Historically, many flash devices are bound to this driver by their name. But
* since most of these flash are compatible to some extent, and their
* differences can often be differentiated by the JEDEC read-ID command, we
* encourage new users to add support to the spi-nor library, and simply bind
* against a generic string here (e.g., "jedec,spi-nor").
*
* Many flash names are kept here in this list to keep them available
* as module aliases for existing platforms.
*/
static const struct spi_device_id spi_nor_dev_ids[] = {
/*
* Allow non-DT platform devices to bind to the "spi-nor" modalias, and
* hack around the fact that the SPI core does not provide uevent
* matching for .of_match_table
*/
{"spi-nor"},
/*
* Entries not used in DTs that should be safe to drop after replacing
* them with "spi-nor" in platform data.
*/
{"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
/*
* Entries that were used in DTs without "jedec,spi-nor" fallback and
* should be kept for backward compatibility.
*/
{"at25df321a"}, {"at25df641"}, {"at26df081a"},
{"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
{"mx25l25635e"},{"mx66l51235l"},
{"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
{"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
{"s25fl064k"},
{"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
{"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
{"m25p64"}, {"m25p128"},
{"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
{"w25q80bl"}, {"w25q128"}, {"w25q256"},
/* Flashes that can't be detected using JEDEC */
{"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
{"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
{"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
/* Everspin MRAMs (non-JEDEC) */
{ "mr25h128" }, /* 128 Kib, 40 MHz */
{ "mr25h256" }, /* 256 Kib, 40 MHz */
{ "mr25h10" }, /* 1 Mib, 40 MHz */
{ "mr25h40" }, /* 4 Mib, 40 MHz */
{ },
};
MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
static const struct of_device_id spi_nor_of_table[] = {
/*
* Generic compatibility for SPI NOR that can be identified by the
* JEDEC READ ID opcode (0x9F). Use this, if possible.
*/
{ .compatible = "jedec,spi-nor" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, spi_nor_of_table);
/*
* REVISIT: many of these chips have deep power-down modes, which
* should clearly be entered on suspend() to minimize power use.
* And also when they're otherwise idle...
*/
static struct spi_mem_driver spi_nor_driver = {
.spidrv = {
.driver = {
.name = "spi-nor",
.of_match_table = spi_nor_of_table,
.dev_groups = spi_nor_sysfs_groups,
},
.id_table = spi_nor_dev_ids,
},
.probe = spi_nor_probe,
.remove = spi_nor_remove,
.shutdown = spi_nor_shutdown,
};
static int __init spi_nor_module_init(void)
{
return spi_mem_driver_register(&spi_nor_driver);
}
module_init(spi_nor_module_init);
static void __exit spi_nor_module_exit(void)
{
spi_mem_driver_unregister(&spi_nor_driver);
spi_nor_debugfs_shutdown();
}
module_exit(spi_nor_module_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Huang Shijie <[email protected]>");
MODULE_AUTHOR("Mike Lavender");
MODULE_DESCRIPTION("framework for SPI NOR");
| linux-master | drivers/mtd/spi-nor/core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/mtd/spi-nor.h>
#include "core.h"
static const struct flash_info eon_nor_parts[] = {
/* EON -- en25xxx */
{ "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64)
NO_SFDP_FLAGS(SECT_4K) },
{ "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64) },
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128) },
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K) },
{ "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "en25qh16", INFO(0x1c7015, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64) },
{ "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
{ "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256) },
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512)
PARSE_SFDP },
{ "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128)
NO_SFDP_FLAGS(SECT_4K) },
};
const struct spi_nor_manufacturer spi_nor_eon = {
.name = "eon",
.parts = eon_nor_parts,
.nparts = ARRAY_SIZE(eon_nor_parts),
};
| linux-master | drivers/mtd/spi-nor/eon.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/mtd/spi-nor.h>
#include "core.h"
#define XILINX_OP_SE 0x50 /* Sector erase */
#define XILINX_OP_PP 0x82 /* Page program */
#define XILINX_OP_RDSR 0xd7 /* Read status register */
#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */
#define XSR_RDY BIT(7) /* Ready */
#define XILINX_RDSR_OP(buf) \
SPI_MEM_OP(SPI_MEM_OP_CMD(XILINX_OP_RDSR, 0), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_IN(1, buf, 0))
#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
.id = { \
((_jedec_id) >> 16) & 0xff, \
((_jedec_id) >> 8) & 0xff, \
(_jedec_id) & 0xff \
}, \
.id_len = 3, \
.sector_size = (8 * (_page_size)), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
.n_banks = 1, \
.addr_nbytes = 3, \
.flags = SPI_NOR_NO_FR
/* Xilinx S3AN share MFR with Atmel SPI NOR */
static const struct flash_info xilinx_nor_parts[] = {
/* Xilinx S3AN Internal Flash */
{ "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
{ "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
{ "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
{ "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
{ "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
};
/*
* This code converts an address to the Default Address Mode, that has non
* power of two page sizes. We must support this mode because it is the default
* mode supported by Xilinx tools, it can access the whole flash area and
* changing over to the Power-of-two mode is irreversible and corrupts the
* original data.
* Addr can safely be unsigned int, the biggest S3AN device is smaller than
* 4 MiB.
*/
static u32 s3an_nor_convert_addr(struct spi_nor *nor, u32 addr)
{
u32 page_size = nor->params->page_size;
u32 offset, page;
offset = addr % page_size;
page = addr / page_size;
page <<= (page_size > 512) ? 10 : 9;
return page | offset;
}
/**
* xilinx_nor_read_sr() - Read the Status Register on S3AN flashes.
* @nor: pointer to 'struct spi_nor'.
* @sr: pointer to a DMA-able buffer where the value of the
* Status Register will be written.
*
* Return: 0 on success, -errno otherwise.
*/
static int xilinx_nor_read_sr(struct spi_nor *nor, u8 *sr)
{
int ret;
if (nor->spimem) {
struct spi_mem_op op = XILINX_RDSR_OP(sr);
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = spi_nor_controller_ops_read_reg(nor, XILINX_OP_RDSR, sr,
1);
}
if (ret)
dev_dbg(nor->dev, "error %d reading SR\n", ret);
return ret;
}
/**
* xilinx_nor_sr_ready() - Query the Status Register of the S3AN flash to see
* if the flash is ready for new commands.
* @nor: pointer to 'struct spi_nor'.
*
* Return: 1 if ready, 0 if not ready, -errno on errors.
*/
static int xilinx_nor_sr_ready(struct spi_nor *nor)
{
int ret;
ret = xilinx_nor_read_sr(nor, nor->bouncebuf);
if (ret)
return ret;
return !!(nor->bouncebuf[0] & XSR_RDY);
}
static int xilinx_nor_setup(struct spi_nor *nor,
const struct spi_nor_hwcaps *hwcaps)
{
u32 page_size;
int ret;
ret = xilinx_nor_read_sr(nor, nor->bouncebuf);
if (ret)
return ret;
nor->erase_opcode = XILINX_OP_SE;
nor->program_opcode = XILINX_OP_PP;
nor->read_opcode = SPINOR_OP_READ;
nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
/*
* This flashes have a page size of 264 or 528 bytes (known as
* Default addressing mode). It can be changed to a more standard
* Power of two mode where the page size is 256/512. This comes
* with a price: there is 3% less of space, the data is corrupted
* and the page size cannot be changed back to default addressing
* mode.
*
* The current addressing mode can be read from the XRDSR register
* and should not be changed, because is a destructive operation.
*/
if (nor->bouncebuf[0] & XSR_PAGESIZE) {
/* Flash in Power of 2 mode */
page_size = (nor->params->page_size == 264) ? 256 : 512;
nor->params->page_size = page_size;
nor->mtd.writebufsize = page_size;
nor->params->size = 8 * page_size * nor->info->n_sectors;
nor->mtd.erasesize = 8 * page_size;
} else {
/* Flash in Default addressing mode */
nor->params->convert_addr = s3an_nor_convert_addr;
nor->mtd.erasesize = nor->info->sector_size;
}
return 0;
}
static int xilinx_nor_late_init(struct spi_nor *nor)
{
nor->params->setup = xilinx_nor_setup;
nor->params->ready = xilinx_nor_sr_ready;
return 0;
}
static const struct spi_nor_fixups xilinx_nor_fixups = {
.late_init = xilinx_nor_late_init,
};
const struct spi_nor_manufacturer spi_nor_xilinx = {
.name = "xilinx",
.parts = xilinx_nor_parts,
.nparts = ARRAY_SIZE(xilinx_nor_parts),
.fixups = &xilinx_nor_fixups,
};
| linux-master | drivers/mtd/spi-nor/xilinx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
#include <linux/mtd/spi-nor.h>
#include "core.h"
static const struct flash_info esmt_nor_parts[] = {
/* ESMT */
{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K) },
{ "f25l32qa-2s", INFO(0x8c4116, 0, 64 * 1024, 64)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SECT_4K) },
{ "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK)
NO_SFDP_FLAGS(SECT_4K) },
};
const struct spi_nor_manufacturer spi_nor_esmt = {
.name = "esmt",
.parts = esmt_nor_parts,
.nparts = ARRAY_SIZE(esmt_nor_parts),
};
| linux-master | drivers/mtd/spi-nor/esmt.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.