python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* MTD map driver for BIOS Flash on Intel SCB2 boards
* Copyright (C) 2002 Sun Microsystems, Inc.
* Tim Hockin <[email protected]>
*
* A few notes on this MTD map:
*
* This was developed with a small number of SCB2 boards to test on.
* Hopefully, Intel has not introducted too many unaccounted variables in the
* making of this board.
*
* The BIOS marks its own memory region as 'reserved' in the e820 map. We
* try to request it here, but if it fails, we carry on anyway.
*
* This is how the chip is attached, so said the schematic:
* * a 4 MiB (32 Mib) 16 bit chip
* * a 1 MiB memory region
* * A20 and A21 pulled up
* * D8-D15 ignored
* What this means is that, while we are addressing bytes linearly, we are
* really addressing words, and discarding the other byte. This means that
* the chip MUST BE at least 2 MiB. This also means that every block is
* actually half as big as the chip reports. It also means that accesses of
* logical address 0 hit higher-address sections of the chip, not physical 0.
* One can only hope that these 4MiB x16 chips were a lot cheaper than 1MiB x8
* chips.
*
* This driver assumes the chip is not write-protected by an external signal.
* As of the this writing, that is true, but may change, just to spite me.
*
* The actual BIOS layout has been mostly reverse engineered. Intel BIOS
* updates for this board include 10 related (*.bio - &.bi9) binary files and
* another separate (*.bbo) binary file. The 10 files are 64k of data + a
* small header. If the headers are stripped off, the 10 64k files can be
* concatenated into a 640k image. This is your BIOS image, proper. The
* separate .bbo file also has a small header. It is the 'Boot Block'
* recovery BIOS. Once the header is stripped, no further prep is needed.
* As best I can tell, the BIOS is arranged as such:
* offset 0x00000 to 0x4ffff (320k): unknown - SCSI BIOS, etc?
* offset 0x50000 to 0xeffff (640k): BIOS proper
* offset 0xf0000 ty 0xfffff (64k): Boot Block region
*
* Intel's BIOS update program flashes the BIOS and Boot Block in separate
* steps. Probably a wise thing to do.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#define MODNAME "scb2_flash"
#define SCB2_ADDR 0xfff00000
#define SCB2_WINDOW 0x00100000
static void __iomem *scb2_ioaddr;
static struct mtd_info *scb2_mtd;
static struct map_info scb2_map = {
.name = "SCB2 BIOS Flash",
.size = 0,
.bankwidth = 1,
};
static int region_fail;
static int scb2_fixup_mtd(struct mtd_info *mtd)
{
int i;
int done = 0;
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
/* barf if this doesn't look right */
if (cfi->cfiq->InterfaceDesc != CFI_INTERFACE_X16_ASYNC) {
printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n",
cfi->cfiq->InterfaceDesc);
return -1;
}
/* I wasn't here. I didn't see. dwmw2. */
/* the chip is sometimes bigger than the map - what a waste */
mtd->size = map->size;
/*
* We only REALLY get half the chip, due to the way it is
* wired up - D8-D15 are tossed away. We read linear bytes,
* but in reality we are getting 1/2 of each 16-bit read,
* which LOOKS linear to us. Because CFI code accounts for
* things like lock/unlock/erase by eraseregions, we need to
* fudge them to reflect this. Erases go like this:
* * send an erase to an address
* * the chip samples the address and erases the block
* * add the block erasesize to the address and repeat
* -- the problem is that addresses are 16-bit addressable
* -- we end up erasing every-other block
*/
mtd->erasesize /= 2;
for (i = 0; i < mtd->numeraseregions; i++) {
struct mtd_erase_region_info *region = &mtd->eraseregions[i];
region->erasesize /= 2;
}
/*
* If the chip is bigger than the map, it is wired with the high
* address lines pulled up. This makes us access the top portion of
* the chip, so all our erase-region info is wrong. Start cutting from
* the bottom.
*/
for (i = 0; !done && i < mtd->numeraseregions; i++) {
struct mtd_erase_region_info *region = &mtd->eraseregions[i];
if (region->numblocks * region->erasesize > mtd->size) {
region->numblocks = ((unsigned long)mtd->size /
region->erasesize);
done = 1;
} else {
region->numblocks = 0;
}
region->offset = 0;
}
return 0;
}
/* CSB5's 'Function Control Register' has bits for decoding @ >= 0xffc00000 */
#define CSB5_FCR 0x41
#define CSB5_FCR_DECODE_ALL 0x0e
static int scb2_flash_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
u8 reg;
/* enable decoding of the flash region in the south bridge */
pci_read_config_byte(dev, CSB5_FCR, ®);
pci_write_config_byte(dev, CSB5_FCR, reg | CSB5_FCR_DECODE_ALL);
if (!request_mem_region(SCB2_ADDR, SCB2_WINDOW, scb2_map.name)) {
/*
* The BIOS seems to mark the flash region as 'reserved'
* in the e820 map. Warn and go about our business.
*/
printk(KERN_WARNING MODNAME
": warning - can't reserve rom window, continuing\n");
region_fail = 1;
}
/* remap the IO window (w/o caching) */
scb2_ioaddr = ioremap(SCB2_ADDR, SCB2_WINDOW);
if (!scb2_ioaddr) {
printk(KERN_ERR MODNAME ": Failed to ioremap window!\n");
if (!region_fail)
release_mem_region(SCB2_ADDR, SCB2_WINDOW);
return -ENOMEM;
}
scb2_map.phys = SCB2_ADDR;
scb2_map.virt = scb2_ioaddr;
scb2_map.size = SCB2_WINDOW;
simple_map_init(&scb2_map);
/* try to find a chip */
scb2_mtd = do_map_probe("cfi_probe", &scb2_map);
if (!scb2_mtd) {
printk(KERN_ERR MODNAME ": flash probe failed!\n");
iounmap(scb2_ioaddr);
if (!region_fail)
release_mem_region(SCB2_ADDR, SCB2_WINDOW);
return -ENODEV;
}
scb2_mtd->owner = THIS_MODULE;
if (scb2_fixup_mtd(scb2_mtd) < 0) {
mtd_device_unregister(scb2_mtd);
map_destroy(scb2_mtd);
iounmap(scb2_ioaddr);
if (!region_fail)
release_mem_region(SCB2_ADDR, SCB2_WINDOW);
return -ENODEV;
}
printk(KERN_NOTICE MODNAME ": chip size 0x%llx at offset 0x%llx\n",
(unsigned long long)scb2_mtd->size,
(unsigned long long)(SCB2_WINDOW - scb2_mtd->size));
mtd_device_register(scb2_mtd, NULL, 0);
return 0;
}
static void scb2_flash_remove(struct pci_dev *dev)
{
if (!scb2_mtd)
return;
/* disable flash writes */
mtd_lock(scb2_mtd, 0, scb2_mtd->size);
mtd_device_unregister(scb2_mtd);
map_destroy(scb2_mtd);
iounmap(scb2_ioaddr);
scb2_ioaddr = NULL;
if (!region_fail)
release_mem_region(SCB2_ADDR, SCB2_WINDOW);
}
static struct pci_device_id scb2_flash_pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_SERVERWORKS,
.device = PCI_DEVICE_ID_SERVERWORKS_CSB5,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID
},
{ 0, }
};
static struct pci_driver scb2_flash_driver = {
.name = "Intel SCB2 BIOS Flash",
.id_table = scb2_flash_pci_ids,
.probe = scb2_flash_probe,
.remove = scb2_flash_remove,
};
module_pci_driver(scb2_flash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tim Hockin <[email protected]>");
MODULE_DESCRIPTION("MTD map driver for Intel SCB2 BIOS Flash");
MODULE_DEVICE_TABLE(pci, scb2_flash_pci_ids);
| linux-master | drivers/mtd/maps/scb2_flash.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* netsc520.c -- MTD map driver for AMD NetSc520 Demonstration Board
*
* Copyright (C) 2001 Mark Langsdorf ([email protected])
* based on sc520cdp.c by Sysgo Real-Time Solutions GmbH
*
* The NetSc520 is a demonstration board for the Elan Sc520 processor available
* from AMD. It has a single back of 16 megs of 32-bit Flash ROM and another
* 16 megs of SDRAM.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
/*
** The single, 16 megabyte flash bank is divided into four virtual
** partitions. The first partition is 768 KiB and is intended to
** store the kernel image loaded by the bootstrap loader. The second
** partition is 256 KiB and holds the BIOS image. The third
** partition is 14.5 MiB and is intended for the flash file system
** image. The last partition is 512 KiB and contains another copy
** of the BIOS image and the reset vector.
**
** Only the third partition should be mounted. The first partition
** should not be mounted, but it can erased and written to using the
** MTD character routines. The second and fourth partitions should
** not be touched - it is possible to corrupt the BIOS image by
** mounting these partitions, and potentially the board will not be
** recoverable afterwards.
*/
/* partition_info gives details on the logical partitions that the split the
* single flash device into. If the size if zero we use up to the end of the
* device. */
static const struct mtd_partition partition_info[] = {
{
.name = "NetSc520 boot kernel",
.offset = 0,
.size = 0xc0000
},
{
.name = "NetSc520 Low BIOS",
.offset = 0xc0000,
.size = 0x40000
},
{
.name = "NetSc520 file system",
.offset = 0x100000,
.size = 0xe80000
},
{
.name = "NetSc520 High BIOS",
.offset = 0xf80000,
.size = 0x80000
},
};
#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
#define WINDOW_SIZE 0x00100000
#define WINDOW_ADDR 0x00200000
static struct map_info netsc520_map = {
.name = "netsc520 Flash Bank",
.size = WINDOW_SIZE,
.bankwidth = 4,
.phys = WINDOW_ADDR,
};
#define NUM_FLASH_BANKS ARRAY_SIZE(netsc520_map)
static struct mtd_info *mymtd;
static int __init init_netsc520(void)
{
printk(KERN_NOTICE "NetSc520 flash device: 0x%Lx at 0x%Lx\n",
(unsigned long long)netsc520_map.size,
(unsigned long long)netsc520_map.phys);
netsc520_map.virt = ioremap(netsc520_map.phys, netsc520_map.size);
if (!netsc520_map.virt) {
printk("Failed to ioremap\n");
return -EIO;
}
simple_map_init(&netsc520_map);
mymtd = do_map_probe("cfi_probe", &netsc520_map);
if(!mymtd)
mymtd = do_map_probe("map_ram", &netsc520_map);
if(!mymtd)
mymtd = do_map_probe("map_rom", &netsc520_map);
if (!mymtd) {
iounmap(netsc520_map.virt);
return -ENXIO;
}
mymtd->owner = THIS_MODULE;
mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
return 0;
}
static void __exit cleanup_netsc520(void)
{
if (mymtd) {
mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (netsc520_map.virt) {
iounmap(netsc520_map.virt);
netsc520_map.virt = NULL;
}
}
module_init(init_netsc520);
module_exit(cleanup_netsc520);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Langsdorf <[email protected]>");
MODULE_DESCRIPTION("MTD map driver for AMD NetSc520 Demonstration Board");
| linux-master | drivers/mtd/maps/netsc520.c |
// SPDX-License-Identifier: GPL-2.0-only
/* vmu-flash.c
* Driver for SEGA Dreamcast Visual Memory Unit
*
* Copyright (c) Adrian McMenamin 2002 - 2009
* Copyright (c) Paul Mundt 2001
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/maple.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
struct vmu_cache {
unsigned char *buffer; /* Cache */
unsigned int block; /* Which block was cached */
unsigned long jiffies_atc; /* When was it cached? */
int valid;
};
struct mdev_part {
struct maple_device *mdev;
int partition;
};
struct vmupart {
u16 user_blocks;
u16 root_block;
u16 numblocks;
char *name;
struct vmu_cache *pcache;
};
struct memcard {
u16 tempA;
u16 tempB;
u32 partitions;
u32 blocklen;
u32 writecnt;
u32 readcnt;
u32 removable;
int partition;
int read;
unsigned char *blockread;
struct vmupart *parts;
struct mtd_info *mtd;
};
struct vmu_block {
unsigned int num; /* block number */
unsigned int ofs; /* block offset */
};
static struct vmu_block *ofs_to_block(unsigned long src_ofs,
struct mtd_info *mtd, int partition)
{
struct vmu_block *vblock;
struct maple_device *mdev;
struct memcard *card;
struct mdev_part *mpart;
int num;
mpart = mtd->priv;
mdev = mpart->mdev;
card = maple_get_drvdata(mdev);
if (src_ofs >= card->parts[partition].numblocks * card->blocklen)
goto failed;
num = src_ofs / card->blocklen;
if (num > card->parts[partition].numblocks)
goto failed;
vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL);
if (!vblock)
goto failed;
vblock->num = num;
vblock->ofs = src_ofs % card->blocklen;
return vblock;
failed:
return NULL;
}
/* Maple bus callback function for reads */
static void vmu_blockread(struct mapleq *mq)
{
struct maple_device *mdev;
struct memcard *card;
mdev = mq->dev;
card = maple_get_drvdata(mdev);
/* copy the read in data */
if (unlikely(!card->blockread))
return;
memcpy(card->blockread, mq->recvbuf->buf + 12,
card->blocklen/card->readcnt);
}
/* Interface with maple bus to read blocks
* caching the results so that other parts
* of the driver can access block reads */
static int maple_vmu_read_block(unsigned int num, unsigned char *buf,
struct mtd_info *mtd)
{
struct memcard *card;
struct mdev_part *mpart;
struct maple_device *mdev;
int partition, error = 0, x, wait;
unsigned char *blockread = NULL;
struct vmu_cache *pcache;
__be32 sendbuf;
mpart = mtd->priv;
mdev = mpart->mdev;
partition = mpart->partition;
card = maple_get_drvdata(mdev);
pcache = card->parts[partition].pcache;
pcache->valid = 0;
/* prepare the cache for this block */
if (!pcache->buffer) {
pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL);
if (!pcache->buffer) {
dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due"
" to lack of memory\n", mdev->port,
mdev->unit);
error = -ENOMEM;
goto outB;
}
}
/*
* Reads may be phased - again the hardware spec
* supports this - though may not be any devices in
* the wild that implement it, but we will here
*/
for (x = 0; x < card->readcnt; x++) {
sendbuf = cpu_to_be32(partition << 24 | x << 16 | num);
if (atomic_read(&mdev->busy) == 1) {
wait_event_interruptible_timeout(mdev->maple_wait,
atomic_read(&mdev->busy) == 0, HZ);
if (atomic_read(&mdev->busy) == 1) {
dev_notice(&mdev->dev, "VMU at (%d, %d)"
" is busy\n", mdev->port, mdev->unit);
error = -EAGAIN;
goto outB;
}
}
atomic_set(&mdev->busy, 1);
blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL);
if (!blockread) {
error = -ENOMEM;
atomic_set(&mdev->busy, 0);
goto outB;
}
card->blockread = blockread;
maple_getcond_callback(mdev, vmu_blockread, 0,
MAPLE_FUNC_MEMCARD);
error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
MAPLE_COMMAND_BREAD, 2, &sendbuf);
/* Very long timeouts seem to be needed when box is stressed */
wait = wait_event_interruptible_timeout(mdev->maple_wait,
(atomic_read(&mdev->busy) == 0 ||
atomic_read(&mdev->busy) == 2), HZ * 3);
/*
* MTD layer does not handle hotplugging well
* so have to return errors when VMU is unplugged
* in the middle of a read (busy == 2)
*/
if (error || atomic_read(&mdev->busy) == 2) {
if (atomic_read(&mdev->busy) == 2)
error = -ENXIO;
atomic_set(&mdev->busy, 0);
card->blockread = NULL;
goto outA;
}
if (wait == 0 || wait == -ERESTARTSYS) {
card->blockread = NULL;
atomic_set(&mdev->busy, 0);
error = -EIO;
list_del_init(&(mdev->mq->list));
kfree(mdev->mq->sendbuf);
mdev->mq->sendbuf = NULL;
if (wait == -ERESTARTSYS) {
dev_warn(&mdev->dev, "VMU read on (%d, %d)"
" interrupted on block 0x%X\n",
mdev->port, mdev->unit, num);
} else
dev_notice(&mdev->dev, "VMU read on (%d, %d)"
" timed out on block 0x%X\n",
mdev->port, mdev->unit, num);
goto outA;
}
memcpy(buf + (card->blocklen/card->readcnt) * x, blockread,
card->blocklen/card->readcnt);
memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x,
card->blockread, card->blocklen/card->readcnt);
card->blockread = NULL;
pcache->block = num;
pcache->jiffies_atc = jiffies;
pcache->valid = 1;
kfree(blockread);
}
return error;
outA:
kfree(blockread);
outB:
return error;
}
/* communicate with maple bus for phased writing */
static int maple_vmu_write_block(unsigned int num, const unsigned char *buf,
struct mtd_info *mtd)
{
struct memcard *card;
struct mdev_part *mpart;
struct maple_device *mdev;
int partition, error, locking, x, phaselen, wait;
__be32 *sendbuf;
mpart = mtd->priv;
mdev = mpart->mdev;
partition = mpart->partition;
card = maple_get_drvdata(mdev);
phaselen = card->blocklen/card->writecnt;
sendbuf = kmalloc(phaselen + 4, GFP_KERNEL);
if (!sendbuf) {
error = -ENOMEM;
goto fail_nosendbuf;
}
for (x = 0; x < card->writecnt; x++) {
sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num);
memcpy(&sendbuf[1], buf + phaselen * x, phaselen);
/* wait until the device is not busy doing something else
* or 1 second - which ever is longer */
if (atomic_read(&mdev->busy) == 1) {
wait_event_interruptible_timeout(mdev->maple_wait,
atomic_read(&mdev->busy) == 0, HZ);
if (atomic_read(&mdev->busy) == 1) {
error = -EBUSY;
dev_notice(&mdev->dev, "VMU write at (%d, %d)"
"failed - device is busy\n",
mdev->port, mdev->unit);
goto fail_nolock;
}
}
atomic_set(&mdev->busy, 1);
locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf);
wait = wait_event_interruptible_timeout(mdev->maple_wait,
atomic_read(&mdev->busy) == 0, HZ/10);
if (locking) {
error = -EIO;
atomic_set(&mdev->busy, 0);
goto fail_nolock;
}
if (atomic_read(&mdev->busy) == 2) {
atomic_set(&mdev->busy, 0);
} else if (wait == 0 || wait == -ERESTARTSYS) {
error = -EIO;
dev_warn(&mdev->dev, "Write at (%d, %d) of block"
" 0x%X at phase %d failed: could not"
" communicate with VMU", mdev->port,
mdev->unit, num, x);
atomic_set(&mdev->busy, 0);
kfree(mdev->mq->sendbuf);
mdev->mq->sendbuf = NULL;
list_del_init(&(mdev->mq->list));
goto fail_nolock;
}
}
kfree(sendbuf);
return card->blocklen;
fail_nolock:
kfree(sendbuf);
fail_nosendbuf:
dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port,
mdev->unit);
return error;
}
/* mtd function to simulate reading byte by byte */
static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval,
struct mtd_info *mtd)
{
struct vmu_block *vblock;
struct memcard *card;
struct mdev_part *mpart;
struct maple_device *mdev;
unsigned char *buf, ret;
int partition, error;
mpart = mtd->priv;
mdev = mpart->mdev;
partition = mpart->partition;
card = maple_get_drvdata(mdev);
*retval = 0;
buf = kmalloc(card->blocklen, GFP_KERNEL);
if (!buf) {
*retval = 1;
ret = -ENOMEM;
goto finish;
}
vblock = ofs_to_block(ofs, mtd, partition);
if (!vblock) {
*retval = 3;
ret = -ENOMEM;
goto out_buf;
}
error = maple_vmu_read_block(vblock->num, buf, mtd);
if (error) {
ret = error;
*retval = 2;
goto out_vblock;
}
ret = buf[vblock->ofs];
out_vblock:
kfree(vblock);
out_buf:
kfree(buf);
finish:
return ret;
}
/* mtd higher order function to read flash */
static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct maple_device *mdev;
struct memcard *card;
struct mdev_part *mpart;
struct vmu_cache *pcache;
struct vmu_block *vblock;
int index = 0, retval, partition, leftover, numblocks;
unsigned char cx;
mpart = mtd->priv;
mdev = mpart->mdev;
partition = mpart->partition;
card = maple_get_drvdata(mdev);
numblocks = card->parts[partition].numblocks;
if (from + len > numblocks * card->blocklen)
len = numblocks * card->blocklen - from;
if (len == 0)
return -EIO;
/* Have we cached this bit already? */
pcache = card->parts[partition].pcache;
do {
vblock = ofs_to_block(from + index, mtd, partition);
if (!vblock)
return -ENOMEM;
/* Have we cached this and is the cache valid and timely? */
if (pcache->valid &&
time_before(jiffies, pcache->jiffies_atc + HZ) &&
(pcache->block == vblock->num)) {
/* we have cached it, so do necessary copying */
leftover = card->blocklen - vblock->ofs;
if (vblock->ofs + len - index < card->blocklen) {
/* only a bit of this block to copy */
memcpy(buf + index,
pcache->buffer + vblock->ofs,
len - index);
index = len;
} else {
/* otherwise copy remainder of whole block */
memcpy(buf + index, pcache->buffer +
vblock->ofs, leftover);
index += leftover;
}
} else {
/*
* Not cached so read one byte -
* but cache the rest of the block
*/
cx = vmu_flash_read_char(from + index, &retval, mtd);
if (retval) {
*retlen = index;
kfree(vblock);
return cx;
}
memset(buf + index, cx, 1);
index++;
}
kfree(vblock);
} while (len > index);
*retlen = index;
return 0;
}
static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct maple_device *mdev;
struct memcard *card;
struct mdev_part *mpart;
int index = 0, partition, error = 0, numblocks;
struct vmu_cache *pcache;
struct vmu_block *vblock;
unsigned char *buffer;
mpart = mtd->priv;
mdev = mpart->mdev;
partition = mpart->partition;
card = maple_get_drvdata(mdev);
numblocks = card->parts[partition].numblocks;
if (to + len > numblocks * card->blocklen)
len = numblocks * card->blocklen - to;
if (len == 0) {
error = -EIO;
goto failed;
}
vblock = ofs_to_block(to, mtd, partition);
if (!vblock) {
error = -ENOMEM;
goto failed;
}
buffer = kmalloc(card->blocklen, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto fail_buffer;
}
do {
/* Read in the block we are to write to */
error = maple_vmu_read_block(vblock->num, buffer, mtd);
if (error)
goto fail_io;
do {
buffer[vblock->ofs] = buf[index];
vblock->ofs++;
index++;
if (index >= len)
break;
} while (vblock->ofs < card->blocklen);
/* write out new buffer */
error = maple_vmu_write_block(vblock->num, buffer, mtd);
/* invalidate the cache */
pcache = card->parts[partition].pcache;
pcache->valid = 0;
if (error != card->blocklen)
goto fail_io;
vblock->num++;
vblock->ofs = 0;
} while (len > index);
kfree(buffer);
*retlen = index;
kfree(vblock);
return 0;
fail_io:
kfree(buffer);
fail_buffer:
kfree(vblock);
failed:
dev_err(&mdev->dev, "VMU write failing with error %d\n", error);
return error;
}
static void vmu_flash_sync(struct mtd_info *mtd)
{
/* Do nothing here */
}
/* Maple bus callback function to recursively query hardware details */
static void vmu_queryblocks(struct mapleq *mq)
{
struct maple_device *mdev;
unsigned short *res;
struct memcard *card;
__be32 partnum;
struct vmu_cache *pcache;
struct mdev_part *mpart;
struct mtd_info *mtd_cur;
struct vmupart *part_cur;
int error;
mdev = mq->dev;
card = maple_get_drvdata(mdev);
res = (unsigned short *) (mq->recvbuf->buf);
card->tempA = res[12];
card->tempB = res[6];
dev_info(&mdev->dev, "VMU device at partition %d has %d user "
"blocks with a root block at %d\n", card->partition,
card->tempA, card->tempB);
part_cur = &card->parts[card->partition];
part_cur->user_blocks = card->tempA;
part_cur->root_block = card->tempB;
part_cur->numblocks = card->tempB + 1;
part_cur->name = kmalloc(12, GFP_KERNEL);
if (!part_cur->name)
goto fail_name;
sprintf(part_cur->name, "vmu%d.%d.%d",
mdev->port, mdev->unit, card->partition);
mtd_cur = &card->mtd[card->partition];
mtd_cur->name = part_cur->name;
mtd_cur->type = 8;
mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
mtd_cur->size = part_cur->numblocks * card->blocklen;
mtd_cur->erasesize = card->blocklen;
mtd_cur->_write = vmu_flash_write;
mtd_cur->_read = vmu_flash_read;
mtd_cur->_sync = vmu_flash_sync;
mtd_cur->writesize = card->blocklen;
mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
if (!mpart)
goto fail_mpart;
mpart->mdev = mdev;
mpart->partition = card->partition;
mtd_cur->priv = mpart;
mtd_cur->owner = THIS_MODULE;
pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL);
if (!pcache)
goto fail_cache_create;
part_cur->pcache = pcache;
error = mtd_device_register(mtd_cur, NULL, 0);
if (error)
goto fail_mtd_register;
maple_getcond_callback(mdev, NULL, 0,
MAPLE_FUNC_MEMCARD);
/*
* Set up a recursive call to the (probably theoretical)
* second or more partition
*/
if (++card->partition < card->partitions) {
partnum = cpu_to_be32(card->partition << 24);
maple_getcond_callback(mdev, vmu_queryblocks, 0,
MAPLE_FUNC_MEMCARD);
maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
MAPLE_COMMAND_GETMINFO, 2, &partnum);
}
return;
fail_mtd_register:
dev_err(&mdev->dev, "Could not register maple device at (%d, %d)"
"error is 0x%X\n", mdev->port, mdev->unit, error);
for (error = 0; error <= card->partition; error++) {
kfree(((card->parts)[error]).pcache);
((card->parts)[error]).pcache = NULL;
}
fail_cache_create:
fail_mpart:
for (error = 0; error <= card->partition; error++) {
kfree(((card->mtd)[error]).priv);
((card->mtd)[error]).priv = NULL;
}
maple_getcond_callback(mdev, NULL, 0,
MAPLE_FUNC_MEMCARD);
kfree(part_cur->name);
fail_name:
return;
}
/* Handles very basic info about the flash, queries for details */
static int vmu_connect(struct maple_device *mdev)
{
unsigned long test_flash_data, basic_flash_data;
int c, error;
struct memcard *card;
u32 partnum = 0;
test_flash_data = be32_to_cpu(mdev->devinfo.function);
/* Need to count how many bits are set - to find out which
* function_data element has details of the memory card
*/
c = hweight_long(test_flash_data);
basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
if (!card) {
error = -ENOMEM;
goto fail_nomem;
}
card->partitions = (basic_flash_data >> 24 & 0xFF) + 1;
card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5;
card->writecnt = basic_flash_data >> 12 & 0xF;
card->readcnt = basic_flash_data >> 8 & 0xF;
card->removable = basic_flash_data >> 7 & 1;
card->partition = 0;
/*
* Not sure there are actually any multi-partition devices in the
* real world, but the hardware supports them, so, so will we
*/
card->parts = kmalloc_array(card->partitions, sizeof(struct vmupart),
GFP_KERNEL);
if (!card->parts) {
error = -ENOMEM;
goto fail_partitions;
}
card->mtd = kmalloc_array(card->partitions, sizeof(struct mtd_info),
GFP_KERNEL);
if (!card->mtd) {
error = -ENOMEM;
goto fail_mtd_info;
}
maple_set_drvdata(mdev, card);
/*
* We want to trap meminfo not get cond
* so set interval to zero, but rely on maple bus
* driver to pass back the results of the meminfo
*/
maple_getcond_callback(mdev, vmu_queryblocks, 0,
MAPLE_FUNC_MEMCARD);
/* Make sure we are clear to go */
if (atomic_read(&mdev->busy) == 1) {
wait_event_interruptible_timeout(mdev->maple_wait,
atomic_read(&mdev->busy) == 0, HZ);
if (atomic_read(&mdev->busy) == 1) {
dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n",
mdev->port, mdev->unit);
error = -EAGAIN;
goto fail_device_busy;
}
}
atomic_set(&mdev->busy, 1);
/*
* Set up the minfo call: vmu_queryblocks will handle
* the information passed back
*/
error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
MAPLE_COMMAND_GETMINFO, 2, &partnum);
if (error) {
dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)"
" error is 0x%X\n", mdev->port, mdev->unit, error);
goto fail_mtd_info;
}
return 0;
fail_device_busy:
kfree(card->mtd);
fail_mtd_info:
kfree(card->parts);
fail_partitions:
kfree(card);
fail_nomem:
return error;
}
static void vmu_disconnect(struct maple_device *mdev)
{
struct memcard *card;
struct mdev_part *mpart;
int x;
mdev->callback = NULL;
card = maple_get_drvdata(mdev);
for (x = 0; x < card->partitions; x++) {
mpart = ((card->mtd)[x]).priv;
mpart->mdev = NULL;
mtd_device_unregister(&((card->mtd)[x]));
kfree(((card->parts)[x]).name);
}
kfree(card->parts);
kfree(card->mtd);
kfree(card);
}
/* Callback to handle eccentricities of both mtd subsystem
* and general flakyness of Dreamcast VMUs
*/
static int vmu_can_unload(struct maple_device *mdev)
{
struct memcard *card;
int x;
struct mtd_info *mtd;
card = maple_get_drvdata(mdev);
for (x = 0; x < card->partitions; x++) {
mtd = &((card->mtd)[x]);
if (mtd->usecount > 0)
return 0;
}
return 1;
}
#define ERRSTR "VMU at (%d, %d) file error -"
static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
{
enum maple_file_errors error = ((int *)recvbuf)[1];
switch (error) {
case MAPLE_FILEERR_INVALID_PARTITION:
dev_notice(&mdev->dev, ERRSTR " invalid partition number\n",
mdev->port, mdev->unit);
break;
case MAPLE_FILEERR_PHASE_ERROR:
dev_notice(&mdev->dev, ERRSTR " phase error\n",
mdev->port, mdev->unit);
break;
case MAPLE_FILEERR_INVALID_BLOCK:
dev_notice(&mdev->dev, ERRSTR " invalid block number\n",
mdev->port, mdev->unit);
break;
case MAPLE_FILEERR_WRITE_ERROR:
dev_notice(&mdev->dev, ERRSTR " write error\n",
mdev->port, mdev->unit);
break;
case MAPLE_FILEERR_INVALID_WRITE_LENGTH:
dev_notice(&mdev->dev, ERRSTR " invalid write length\n",
mdev->port, mdev->unit);
break;
case MAPLE_FILEERR_BAD_CRC:
dev_notice(&mdev->dev, ERRSTR " bad CRC\n",
mdev->port, mdev->unit);
break;
default:
dev_notice(&mdev->dev, ERRSTR " 0x%X\n",
mdev->port, mdev->unit, error);
}
}
static int probe_maple_vmu(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
struct maple_driver *mdrv = to_maple_driver(dev->driver);
mdev->can_unload = vmu_can_unload;
mdev->fileerr_handler = vmu_file_error;
mdev->driver = mdrv;
return vmu_connect(mdev);
}
static int remove_maple_vmu(struct device *dev)
{
struct maple_device *mdev = to_maple_dev(dev);
vmu_disconnect(mdev);
return 0;
}
static struct maple_driver vmu_flash_driver = {
.function = MAPLE_FUNC_MEMCARD,
.drv = {
.name = "Dreamcast_visual_memory",
.probe = probe_maple_vmu,
.remove = remove_maple_vmu,
},
};
static int __init vmu_flash_map_init(void)
{
return maple_driver_register(&vmu_flash_driver);
}
static void __exit vmu_flash_map_exit(void)
{
maple_driver_unregister(&vmu_flash_driver);
}
module_init(vmu_flash_map_init);
module_exit(vmu_flash_map_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Adrian McMenamin");
MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory");
| linux-master | drivers/mtd/maps/vmu-flash.c |
/****************************************************************************/
/*
* uclinux.c -- generic memory mapped MTD driver for uclinux
*
* (C) Copyright 2002, Greg Ungerer ([email protected])
*
* License: GPL
*/
/****************************************************************************/
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/major.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
#include <asm/sections.h>
/****************************************************************************/
#ifdef CONFIG_MTD_ROM
#define MAP_NAME "rom"
#else
#define MAP_NAME "ram"
#endif
static struct map_info uclinux_ram_map = {
.name = MAP_NAME,
.size = 0,
};
static unsigned long physaddr = -1;
module_param(physaddr, ulong, S_IRUGO);
static struct mtd_info *uclinux_ram_mtdinfo;
/****************************************************************************/
static const struct mtd_partition uclinux_romfs[] = {
{ .name = "ROMfs" }
};
#define NUM_PARTITIONS ARRAY_SIZE(uclinux_romfs)
/****************************************************************************/
static int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
*virt = map->virt + from;
if (phys)
*phys = map->phys + from;
*retlen = len;
return(0);
}
/****************************************************************************/
static int __init uclinux_mtd_init(void)
{
struct mtd_info *mtd;
struct map_info *mapp;
mapp = &uclinux_ram_map;
if (physaddr == -1)
mapp->phys = (resource_size_t)__bss_stop;
else
mapp->phys = physaddr;
if (!mapp->size)
mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8))));
mapp->bankwidth = 4;
printk("uclinux[mtd]: probe address=0x%x size=0x%x\n",
(int) mapp->phys, (int) mapp->size);
/*
* The filesystem is guaranteed to be in direct mapped memory. It is
* directly following the kernels own bss region. Following the same
* mechanism used by architectures setting up traditional initrds we
* use phys_to_virt to get the virtual address of its start.
*/
mapp->virt = phys_to_virt(mapp->phys);
if (mapp->virt == 0) {
printk("uclinux[mtd]: no virtual mapping?\n");
return(-EIO);
}
simple_map_init(mapp);
mtd = do_map_probe("map_" MAP_NAME, mapp);
if (!mtd) {
printk("uclinux[mtd]: failed to find a mapping?\n");
return(-ENXIO);
}
mtd->owner = THIS_MODULE;
mtd->_point = uclinux_point;
mtd->priv = mapp;
uclinux_ram_mtdinfo = mtd;
mtd_device_register(mtd, uclinux_romfs, NUM_PARTITIONS);
return(0);
}
device_initcall(uclinux_mtd_init);
/****************************************************************************/
| linux-master | drivers/mtd/maps/uclinux.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* esb2rom.c
*
* Normal mappings of flash chips in physical memory
* through the Intel ESB2 Southbridge.
*
* This was derived from ichxrom.c in May 2006 by
* Lew Glendenning <[email protected]>
*
* Eric Biederman, of course, was a major help in this effort.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/flashchip.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/list.h>
#define MOD_NAME KBUILD_BASENAME
#define ADDRESS_NAME_LEN 18
#define ROM_PROBE_STEP_SIZE (64*1024) /* 64KiB */
#define BIOS_CNTL 0xDC
#define BIOS_LOCK_ENABLE 0x02
#define BIOS_WRITE_ENABLE 0x01
/* This became a 16-bit register, and EN2 has disappeared */
#define FWH_DEC_EN1 0xD8
#define FWH_F8_EN 0x8000
#define FWH_F0_EN 0x4000
#define FWH_E8_EN 0x2000
#define FWH_E0_EN 0x1000
#define FWH_D8_EN 0x0800
#define FWH_D0_EN 0x0400
#define FWH_C8_EN 0x0200
#define FWH_C0_EN 0x0100
#define FWH_LEGACY_F_EN 0x0080
#define FWH_LEGACY_E_EN 0x0040
/* reserved 0x0020 and 0x0010 */
#define FWH_70_EN 0x0008
#define FWH_60_EN 0x0004
#define FWH_50_EN 0x0002
#define FWH_40_EN 0x0001
/* these are 32-bit values */
#define FWH_SEL1 0xD0
#define FWH_SEL2 0xD4
#define FWH_8MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN | \
FWH_70_EN | FWH_60_EN | FWH_50_EN | FWH_40_EN)
#define FWH_7MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN | \
FWH_70_EN | FWH_60_EN | FWH_50_EN)
#define FWH_6MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN | \
FWH_70_EN | FWH_60_EN)
#define FWH_5MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN | \
FWH_70_EN)
#define FWH_4MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
FWH_D8_EN | FWH_D0_EN | FWH_C8_EN | FWH_C0_EN)
#define FWH_3_5MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
FWH_D8_EN | FWH_D0_EN | FWH_C8_EN)
#define FWH_3MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
FWH_D8_EN | FWH_D0_EN)
#define FWH_2_5MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN | \
FWH_D8_EN)
#define FWH_2MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN | FWH_E0_EN)
#define FWH_1_5MiB (FWH_F8_EN | FWH_F0_EN | FWH_E8_EN)
#define FWH_1MiB (FWH_F8_EN | FWH_F0_EN)
#define FWH_0_5MiB (FWH_F8_EN)
struct esb2rom_window {
void __iomem* virt;
unsigned long phys;
unsigned long size;
struct list_head maps;
struct resource rsrc;
struct pci_dev *pdev;
};
struct esb2rom_map_info {
struct list_head list;
struct map_info map;
struct mtd_info *mtd;
struct resource rsrc;
char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
};
static struct esb2rom_window esb2rom_window = {
.maps = LIST_HEAD_INIT(esb2rom_window.maps),
};
static void esb2rom_cleanup(struct esb2rom_window *window)
{
struct esb2rom_map_info *map, *scratch;
u8 byte;
/* Disable writes through the rom window */
pci_read_config_byte(window->pdev, BIOS_CNTL, &byte);
pci_write_config_byte(window->pdev, BIOS_CNTL,
byte & ~BIOS_WRITE_ENABLE);
/* Free all of the mtd devices */
list_for_each_entry_safe(map, scratch, &window->maps, list) {
if (map->rsrc.parent)
release_resource(&map->rsrc);
mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
}
if (window->rsrc.parent)
release_resource(&window->rsrc);
if (window->virt) {
iounmap(window->virt);
window->virt = NULL;
window->phys = 0;
window->size = 0;
}
pci_dev_put(window->pdev);
}
static int __init esb2rom_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
struct esb2rom_window *window = &esb2rom_window;
struct esb2rom_map_info *map = NULL;
unsigned long map_top;
u8 byte;
u16 word;
/* For now I just handle the ecb2 and I assume there
* are not a lot of resources up at the top of the address
* space. It is possible to handle other devices in the
* top 16MiB but it is very painful. Also since
* you can only really attach a FWH to an ICHX there
* a number of simplifications you can make.
*
* Also you can page firmware hubs if an 8MiB window isn't enough
* but don't currently handle that case either.
*/
window->pdev = pci_dev_get(pdev);
/* RLG: experiment 2. Force the window registers to the widest values */
/*
pci_read_config_word(pdev, FWH_DEC_EN1, &word);
printk(KERN_DEBUG "Original FWH_DEC_EN1 : %x\n", word);
pci_write_config_byte(pdev, FWH_DEC_EN1, 0xff);
pci_read_config_byte(pdev, FWH_DEC_EN1, &byte);
printk(KERN_DEBUG "New FWH_DEC_EN1 : %x\n", byte);
pci_read_config_byte(pdev, FWH_DEC_EN2, &byte);
printk(KERN_DEBUG "Original FWH_DEC_EN2 : %x\n", byte);
pci_write_config_byte(pdev, FWH_DEC_EN2, 0x0f);
pci_read_config_byte(pdev, FWH_DEC_EN2, &byte);
printk(KERN_DEBUG "New FWH_DEC_EN2 : %x\n", byte);
*/
/* Find a region continuous to the end of the ROM window */
window->phys = 0;
pci_read_config_word(pdev, FWH_DEC_EN1, &word);
printk(KERN_DEBUG "pci_read_config_word : %x\n", word);
if ((word & FWH_8MiB) == FWH_8MiB)
window->phys = 0xff400000;
else if ((word & FWH_7MiB) == FWH_7MiB)
window->phys = 0xff500000;
else if ((word & FWH_6MiB) == FWH_6MiB)
window->phys = 0xff600000;
else if ((word & FWH_5MiB) == FWH_5MiB)
window->phys = 0xFF700000;
else if ((word & FWH_4MiB) == FWH_4MiB)
window->phys = 0xffc00000;
else if ((word & FWH_3_5MiB) == FWH_3_5MiB)
window->phys = 0xffc80000;
else if ((word & FWH_3MiB) == FWH_3MiB)
window->phys = 0xffd00000;
else if ((word & FWH_2_5MiB) == FWH_2_5MiB)
window->phys = 0xffd80000;
else if ((word & FWH_2MiB) == FWH_2MiB)
window->phys = 0xffe00000;
else if ((word & FWH_1_5MiB) == FWH_1_5MiB)
window->phys = 0xffe80000;
else if ((word & FWH_1MiB) == FWH_1MiB)
window->phys = 0xfff00000;
else if ((word & FWH_0_5MiB) == FWH_0_5MiB)
window->phys = 0xfff80000;
if (window->phys == 0) {
printk(KERN_ERR MOD_NAME ": Rom window is closed\n");
goto out;
}
/* reserved 0x0020 and 0x0010 */
window->phys -= 0x400000UL;
window->size = (0xffffffffUL - window->phys) + 1UL;
/* Enable writes through the rom window */
pci_read_config_byte(pdev, BIOS_CNTL, &byte);
if (!(byte & BIOS_WRITE_ENABLE) && (byte & (BIOS_LOCK_ENABLE))) {
/* The BIOS will generate an error if I enable
* this device, so don't even try.
*/
printk(KERN_ERR MOD_NAME ": firmware access control, I can't enable writes\n");
goto out;
}
pci_write_config_byte(pdev, BIOS_CNTL, byte | BIOS_WRITE_ENABLE);
/*
* Try to reserve the window mem region. If this fails then
* it is likely due to the window being "reserved" by the BIOS.
*/
window->rsrc.name = MOD_NAME;
window->rsrc.start = window->phys;
window->rsrc.end = window->phys + window->size - 1;
window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, &window->rsrc)) {
window->rsrc.parent = NULL;
printk(KERN_DEBUG MOD_NAME ": "
"%s(): Unable to register resource %pR - kernel bug?\n",
__func__, &window->rsrc);
}
/* Map the firmware hub into my address space. */
window->virt = ioremap(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
goto out;
}
/* Get the first address to look for an rom chip at */
map_top = window->phys;
if ((window->phys & 0x3fffff) != 0) {
/* if not aligned on 4MiB, look 4MiB lower in address space */
map_top = window->phys + 0x400000;
}
#if 1
/* The probe sequence run over the firmware hub lock
* registers sets them to 0x7 (no access).
* (Insane hardware design, but most copied Intel's.)
* ==> Probe at most the last 4M of the address space.
*/
if (map_top < 0xffc00000)
map_top = 0xffc00000;
#endif
/* Loop through and look for rom chips */
while ((map_top - 1) < 0xffffffffUL) {
struct cfi_private *cfi;
unsigned long offset;
int i;
if (!map) {
map = kmalloc(sizeof(*map), GFP_KERNEL);
if (!map)
goto out;
}
memset(map, 0, sizeof(*map));
INIT_LIST_HEAD(&map->list);
map->map.name = map->map_name;
map->map.phys = map_top;
offset = map_top - window->phys;
map->map.virt = (void __iomem *)
(((unsigned long)(window->virt)) + offset);
map->map.size = 0xffffffffUL - map_top + 1UL;
/* Set the name of the map to the address I am trying */
sprintf(map->map_name, "%s @%08Lx",
MOD_NAME, (unsigned long long)map->map.phys);
/* Firmware hubs only use vpp when being programmed
* in a factory setting. So in-place programming
* needs to use a different method.
*/
for(map->map.bankwidth = 32; map->map.bankwidth;
map->map.bankwidth >>= 1) {
char **probe_type;
/* Skip bankwidths that are not supported */
if (!map_bankwidth_supported(map->map.bankwidth))
continue;
/* Setup the map methods */
simple_map_init(&map->map);
/* Try all of the probe methods */
probe_type = rom_probe_types;
for(; *probe_type; probe_type++) {
map->mtd = do_map_probe(*probe_type, &map->map);
if (map->mtd)
goto found;
}
}
map_top += ROM_PROBE_STEP_SIZE;
continue;
found:
/* Trim the size if we are larger than the map */
if (map->mtd->size > map->map.size) {
printk(KERN_WARNING MOD_NAME
" rom(%llu) larger than window(%lu). fixing...\n",
(unsigned long long)map->mtd->size, map->map.size);
map->mtd->size = map->map.size;
}
if (window->rsrc.parent) {
/*
* Registering the MTD device in iomem may not be possible
* if there is a BIOS "reserved" and BUSY range. If this
* fails then continue anyway.
*/
map->rsrc.name = map->map_name;
map->rsrc.start = map->map.phys;
map->rsrc.end = map->map.phys + map->mtd->size - 1;
map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&window->rsrc, &map->rsrc)) {
printk(KERN_ERR MOD_NAME
": cannot reserve MTD resource\n");
map->rsrc.parent = NULL;
}
}
/* Make the whole region visible in the map */
map->map.virt = window->virt;
map->map.phys = window->phys;
cfi = map->map.fldrv_priv;
for(i = 0; i < cfi->numchips; i++)
cfi->chips[i].start += offset;
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
}
/* Calculate the new value of map_top */
map_top += map->mtd->size;
/* File away the map structure */
list_add(&map->list, &window->maps);
map = NULL;
}
out:
/* Free any left over map structures */
kfree(map);
/* See if I have any map structures */
if (list_empty(&window->maps)) {
esb2rom_cleanup(window);
return -ENODEV;
}
return 0;
}
static void esb2rom_remove_one(struct pci_dev *pdev)
{
struct esb2rom_window *window = &esb2rom_window;
esb2rom_cleanup(window);
}
static const struct pci_device_id esb2rom_pci_tbl[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0, },
};
#if 0
MODULE_DEVICE_TABLE(pci, esb2rom_pci_tbl);
static struct pci_driver esb2rom_driver = {
.name = MOD_NAME,
.id_table = esb2rom_pci_tbl,
.probe = esb2rom_init_one,
.remove = esb2rom_remove_one,
};
#endif
static int __init init_esb2rom(void)
{
struct pci_dev *pdev;
const struct pci_device_id *id;
int retVal;
pdev = NULL;
for (id = esb2rom_pci_tbl; id->vendor; id++) {
printk(KERN_DEBUG "device id = %x\n", id->device);
pdev = pci_get_device(id->vendor, id->device, NULL);
if (pdev) {
printk(KERN_DEBUG "matched device = %x\n", id->device);
break;
}
}
if (pdev) {
printk(KERN_DEBUG "matched device id %x\n", id->device);
retVal = esb2rom_init_one(pdev, &esb2rom_pci_tbl[0]);
pci_dev_put(pdev);
printk(KERN_DEBUG "retVal = %d\n", retVal);
return retVal;
}
return -ENXIO;
#if 0
return pci_register_driver(&esb2rom_driver);
#endif
}
static void __exit cleanup_esb2rom(void)
{
esb2rom_remove_one(esb2rom_window.pdev);
}
module_init(init_esb2rom);
module_exit(cleanup_esb2rom);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lew Glendenning <[email protected]>");
MODULE_DESCRIPTION("MTD map driver for BIOS chips on the ESB2 southbridge");
| linux-master | drivers/mtd/maps/esb2rom.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Normal mappings of chips in physical memory
*
* Copyright (C) 2003 MontaVista Software Inc.
* Author: Jun Sun, [email protected] or [email protected]
*
* 031022 - [jsun] add run-time configure and partition setup
*
* Device tree support:
* Copyright (C) 2006 MontaVista Software Inc.
* Author: Vitaly Wool <[email protected]>
*
* Revised to handle newer style flash binding by:
* Copyright (C) 2007 David Gibson, IBM Corporation.
*
* GPIO address extension:
* Handle the case where a flash device is mostly addressed using physical
* line and supplemented by GPIOs. This way you can hook up say a 8MiB flash
* to a 2MiB memory range and use the GPIOs to select a particular range.
*
* Copyright © 2000 Nicolas Pitre <[email protected]>
* Copyright © 2005-2009 Analog Devices Inc.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/concat.h>
#include <linux/mtd/cfi_endian.h>
#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/gpio/consumer.h>
#include "physmap-bt1-rom.h"
#include "physmap-gemini.h"
#include "physmap-ixp4xx.h"
#include "physmap-versatile.h"
struct physmap_flash_info {
unsigned int nmaps;
struct mtd_info **mtds;
struct mtd_info *cmtd;
struct map_info *maps;
spinlock_t vpp_lock;
int vpp_refcnt;
const char *probe_type;
const char * const *part_types;
unsigned int nparts;
const struct mtd_partition *parts;
struct gpio_descs *gpios;
unsigned int gpio_values;
unsigned int win_order;
};
static int physmap_flash_remove(struct platform_device *dev)
{
struct physmap_flash_info *info;
struct physmap_flash_data *physmap_data;
int i;
info = platform_get_drvdata(dev);
if (info->cmtd) {
WARN_ON(mtd_device_unregister(info->cmtd));
if (info->cmtd != info->mtds[0])
mtd_concat_destroy(info->cmtd);
}
for (i = 0; i < info->nmaps; i++) {
if (info->mtds[i])
map_destroy(info->mtds[i]);
}
physmap_data = dev_get_platdata(&dev->dev);
if (physmap_data && physmap_data->exit)
physmap_data->exit(dev);
pm_runtime_put(&dev->dev);
pm_runtime_disable(&dev->dev);
return 0;
}
static void physmap_set_vpp(struct map_info *map, int state)
{
struct platform_device *pdev;
struct physmap_flash_data *physmap_data;
struct physmap_flash_info *info;
unsigned long flags;
pdev = (struct platform_device *)map->map_priv_1;
physmap_data = dev_get_platdata(&pdev->dev);
if (!physmap_data->set_vpp)
return;
info = platform_get_drvdata(pdev);
spin_lock_irqsave(&info->vpp_lock, flags);
if (state) {
if (++info->vpp_refcnt == 1) /* first nested 'on' */
physmap_data->set_vpp(pdev, 1);
} else {
if (--info->vpp_refcnt == 0) /* last nested 'off' */
physmap_data->set_vpp(pdev, 0);
}
spin_unlock_irqrestore(&info->vpp_lock, flags);
}
#if IS_ENABLED(CONFIG_MTD_PHYSMAP_GPIO_ADDR)
static void physmap_set_addr_gpios(struct physmap_flash_info *info,
unsigned long ofs)
{
unsigned int i;
ofs >>= info->win_order;
if (info->gpio_values == ofs)
return;
for (i = 0; i < info->gpios->ndescs; i++) {
if ((BIT(i) & ofs) == (BIT(i) & info->gpio_values))
continue;
gpiod_set_value(info->gpios->desc[i], !!(BIT(i) & ofs));
}
info->gpio_values = ofs;
}
#define win_mask(order) (BIT(order) - 1)
static map_word physmap_addr_gpios_read(struct map_info *map,
unsigned long ofs)
{
struct platform_device *pdev;
struct physmap_flash_info *info;
map_word mw;
u16 word;
pdev = (struct platform_device *)map->map_priv_1;
info = platform_get_drvdata(pdev);
physmap_set_addr_gpios(info, ofs);
word = readw(map->virt + (ofs & win_mask(info->win_order)));
mw.x[0] = word;
return mw;
}
static void physmap_addr_gpios_copy_from(struct map_info *map, void *buf,
unsigned long ofs, ssize_t len)
{
struct platform_device *pdev;
struct physmap_flash_info *info;
pdev = (struct platform_device *)map->map_priv_1;
info = platform_get_drvdata(pdev);
while (len) {
unsigned int winofs = ofs & win_mask(info->win_order);
unsigned int chunklen = min_t(unsigned int, len,
BIT(info->win_order) - winofs);
physmap_set_addr_gpios(info, ofs);
memcpy_fromio(buf, map->virt + winofs, chunklen);
len -= chunklen;
buf += chunklen;
ofs += chunklen;
}
}
static void physmap_addr_gpios_write(struct map_info *map, map_word mw,
unsigned long ofs)
{
struct platform_device *pdev;
struct physmap_flash_info *info;
u16 word;
pdev = (struct platform_device *)map->map_priv_1;
info = platform_get_drvdata(pdev);
physmap_set_addr_gpios(info, ofs);
word = mw.x[0];
writew(word, map->virt + (ofs & win_mask(info->win_order)));
}
static void physmap_addr_gpios_copy_to(struct map_info *map, unsigned long ofs,
const void *buf, ssize_t len)
{
struct platform_device *pdev;
struct physmap_flash_info *info;
pdev = (struct platform_device *)map->map_priv_1;
info = platform_get_drvdata(pdev);
while (len) {
unsigned int winofs = ofs & win_mask(info->win_order);
unsigned int chunklen = min_t(unsigned int, len,
BIT(info->win_order) - winofs);
physmap_set_addr_gpios(info, ofs);
memcpy_toio(map->virt + winofs, buf, chunklen);
len -= chunklen;
buf += chunklen;
ofs += chunklen;
}
}
static int physmap_addr_gpios_map_init(struct map_info *map)
{
map->phys = NO_XIP;
map->read = physmap_addr_gpios_read;
map->copy_from = physmap_addr_gpios_copy_from;
map->write = physmap_addr_gpios_write;
map->copy_to = physmap_addr_gpios_copy_to;
return 0;
}
#else
static int physmap_addr_gpios_map_init(struct map_info *map)
{
return -ENOTSUPP;
}
#endif
#if IS_ENABLED(CONFIG_MTD_PHYSMAP_OF)
static const struct of_device_id of_flash_match[] = {
{
.compatible = "cfi-flash",
.data = "cfi_probe",
},
{
/*
* FIXME: JEDEC chips can't be safely and reliably
* probed, although the mtd code gets it right in
* practice most of the time. We should use the
* vendor and device ids specified by the binding to
* bypass the heuristic probe code, but the mtd layer
* provides, at present, no interface for doing so
* :(.
*/
.compatible = "jedec-flash",
.data = "jedec_probe",
},
{
.compatible = "mtd-ram",
.data = "map_ram",
},
{
.compatible = "mtd-rom",
.data = "map_rom",
},
{
.type = "rom",
.compatible = "direct-mapped"
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, of_flash_match);
static const char * const of_default_part_probes[] = {
"cmdlinepart", "RedBoot", "ofpart", "ofoldpart", NULL
};
static const char * const *of_get_part_probes(struct platform_device *dev)
{
struct device_node *dp = dev->dev.of_node;
const char **res;
int count;
count = of_property_count_strings(dp, "linux,part-probe");
if (count < 0)
return of_default_part_probes;
res = devm_kcalloc(&dev->dev, count + 1, sizeof(*res), GFP_KERNEL);
if (!res)
return NULL;
count = of_property_read_string_array(dp, "linux,part-probe", res,
count);
if (count < 0)
return NULL;
return res;
}
static const char *of_select_probe_type(struct platform_device *dev)
{
struct device_node *dp = dev->dev.of_node;
const struct of_device_id *match;
const char *probe_type;
match = of_match_device(of_flash_match, &dev->dev);
if (!match)
return NULL;
probe_type = match->data;
if (probe_type)
return probe_type;
dev_warn(&dev->dev,
"Device tree uses obsolete \"direct-mapped\" flash binding\n");
of_property_read_string(dp, "probe-type", &probe_type);
if (!probe_type)
return NULL;
if (!strcmp(probe_type, "CFI")) {
probe_type = "cfi_probe";
} else if (!strcmp(probe_type, "JEDEC")) {
probe_type = "jedec_probe";
} else if (!strcmp(probe_type, "ROM")) {
probe_type = "map_rom";
} else {
dev_warn(&dev->dev,
"obsolete_probe: don't know probe type '%s', mapping as rom\n",
probe_type);
probe_type = "map_rom";
}
return probe_type;
}
static int physmap_flash_of_init(struct platform_device *dev)
{
struct physmap_flash_info *info = platform_get_drvdata(dev);
struct device_node *dp = dev->dev.of_node;
const char *mtd_name = NULL;
int err, swap = 0;
bool map_indirect;
unsigned int i;
u32 bankwidth;
if (!dp)
return -EINVAL;
info->probe_type = of_select_probe_type(dev);
info->part_types = of_get_part_probes(dev);
if (!info->part_types)
return -ENOMEM;
of_property_read_string(dp, "linux,mtd-name", &mtd_name);
map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access");
err = of_property_read_u32(dp, "bank-width", &bankwidth);
if (err) {
dev_err(&dev->dev, "Can't get bank width from device tree\n");
return err;
}
if (of_property_read_bool(dp, "big-endian"))
swap = CFI_BIG_ENDIAN;
else if (of_property_read_bool(dp, "little-endian"))
swap = CFI_LITTLE_ENDIAN;
for (i = 0; i < info->nmaps; i++) {
info->maps[i].name = mtd_name;
info->maps[i].swap = swap;
info->maps[i].bankwidth = bankwidth;
info->maps[i].device_node = dp;
err = of_flash_probe_bt1_rom(dev, dp, &info->maps[i]);
if (err)
return err;
err = of_flash_probe_gemini(dev, dp, &info->maps[i]);
if (err)
return err;
err = of_flash_probe_ixp4xx(dev, dp, &info->maps[i]);
if (err)
return err;
err = of_flash_probe_versatile(dev, dp, &info->maps[i]);
if (err)
return err;
/*
* On some platforms (e.g. MPC5200) a direct 1:1 mapping
* may cause problems with JFFS2 usage, as the local bus (LPB)
* doesn't support unaligned accesses as implemented in the
* JFFS2 code via memcpy(). By setting NO_XIP, the
* flash will not be exposed directly to the MTD users
* (e.g. JFFS2) any more.
*/
if (map_indirect)
info->maps[i].phys = NO_XIP;
}
return 0;
}
#else /* IS_ENABLED(CONFIG_MTD_PHYSMAP_OF) */
#define of_flash_match NULL
static int physmap_flash_of_init(struct platform_device *dev)
{
return -ENOTSUPP;
}
#endif /* IS_ENABLED(CONFIG_MTD_PHYSMAP_OF) */
static const char * const rom_probe_types[] = {
"cfi_probe", "jedec_probe", "qinfo_probe", "map_rom",
};
static const char * const part_probe_types[] = {
"cmdlinepart", "RedBoot", "afs", NULL
};
static int physmap_flash_pdata_init(struct platform_device *dev)
{
struct physmap_flash_info *info = platform_get_drvdata(dev);
struct physmap_flash_data *physmap_data;
unsigned int i;
int err;
physmap_data = dev_get_platdata(&dev->dev);
if (!physmap_data)
return -EINVAL;
info->probe_type = physmap_data->probe_type;
info->part_types = physmap_data->part_probe_types ? : part_probe_types;
info->parts = physmap_data->parts;
info->nparts = physmap_data->nr_parts;
if (physmap_data->init) {
err = physmap_data->init(dev);
if (err)
return err;
}
for (i = 0; i < info->nmaps; i++) {
info->maps[i].bankwidth = physmap_data->width;
info->maps[i].pfow_base = physmap_data->pfow_base;
info->maps[i].set_vpp = physmap_set_vpp;
}
return 0;
}
static int physmap_flash_probe(struct platform_device *dev)
{
struct physmap_flash_info *info;
int err = 0;
int i;
if (!dev->dev.of_node && !dev_get_platdata(&dev->dev))
return -EINVAL;
info = devm_kzalloc(&dev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
while (platform_get_resource(dev, IORESOURCE_MEM, info->nmaps))
info->nmaps++;
if (!info->nmaps)
return -ENODEV;
info->maps = devm_kzalloc(&dev->dev,
sizeof(*info->maps) * info->nmaps,
GFP_KERNEL);
if (!info->maps)
return -ENOMEM;
info->mtds = devm_kzalloc(&dev->dev,
sizeof(*info->mtds) * info->nmaps,
GFP_KERNEL);
if (!info->mtds)
return -ENOMEM;
platform_set_drvdata(dev, info);
info->gpios = devm_gpiod_get_array_optional(&dev->dev, "addr",
GPIOD_OUT_LOW);
if (IS_ERR(info->gpios))
return PTR_ERR(info->gpios);
if (info->gpios && info->nmaps > 1) {
dev_err(&dev->dev, "addr-gpios only supported for nmaps == 1\n");
return -EINVAL;
}
pm_runtime_enable(&dev->dev);
pm_runtime_get_sync(&dev->dev);
if (dev->dev.of_node)
err = physmap_flash_of_init(dev);
else
err = physmap_flash_pdata_init(dev);
if (err) {
pm_runtime_put(&dev->dev);
pm_runtime_disable(&dev->dev);
return err;
}
for (i = 0; i < info->nmaps; i++) {
struct resource *res;
info->maps[i].virt = devm_platform_get_and_ioremap_resource(dev, i, &res);
if (IS_ERR(info->maps[i].virt)) {
err = PTR_ERR(info->maps[i].virt);
goto err_out;
}
dev_notice(&dev->dev, "physmap platform flash device: %pR\n",
res);
if (!info->maps[i].name)
info->maps[i].name = dev_name(&dev->dev);
if (!info->maps[i].phys)
info->maps[i].phys = res->start;
info->win_order = get_bitmask_order(resource_size(res)) - 1;
info->maps[i].size = BIT(info->win_order +
(info->gpios ?
info->gpios->ndescs : 0));
info->maps[i].map_priv_1 = (unsigned long)dev;
if (info->gpios) {
err = physmap_addr_gpios_map_init(&info->maps[i]);
if (err)
goto err_out;
}
#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
/*
* Only use the simple_map implementation if map hooks are not
* implemented. Since map->read() is mandatory checking for its
* presence is enough.
*/
if (!info->maps[i].read)
simple_map_init(&info->maps[i]);
#else
simple_map_init(&info->maps[i]);
#endif
if (info->probe_type) {
info->mtds[i] = do_map_probe(info->probe_type,
&info->maps[i]);
} else {
int j;
for (j = 0; j < ARRAY_SIZE(rom_probe_types); j++) {
info->mtds[i] = do_map_probe(rom_probe_types[j],
&info->maps[i]);
if (info->mtds[i])
break;
}
}
if (!info->mtds[i]) {
dev_err(&dev->dev, "map_probe failed\n");
err = -ENXIO;
goto err_out;
}
info->mtds[i]->dev.parent = &dev->dev;
}
if (info->nmaps == 1) {
info->cmtd = info->mtds[0];
} else {
/*
* We detected multiple devices. Concatenate them together.
*/
info->cmtd = mtd_concat_create(info->mtds, info->nmaps,
dev_name(&dev->dev));
if (!info->cmtd)
err = -ENXIO;
}
if (err)
goto err_out;
spin_lock_init(&info->vpp_lock);
mtd_set_of_node(info->cmtd, dev->dev.of_node);
err = mtd_device_parse_register(info->cmtd, info->part_types, NULL,
info->parts, info->nparts);
if (err)
goto err_out;
return 0;
err_out:
physmap_flash_remove(dev);
return err;
}
#ifdef CONFIG_PM
static void physmap_flash_shutdown(struct platform_device *dev)
{
struct physmap_flash_info *info = platform_get_drvdata(dev);
int i;
for (i = 0; i < info->nmaps && info->mtds[i]; i++)
if (mtd_suspend(info->mtds[i]) == 0)
mtd_resume(info->mtds[i]);
}
#else
#define physmap_flash_shutdown NULL
#endif
static struct platform_driver physmap_flash_driver = {
.probe = physmap_flash_probe,
.remove = physmap_flash_remove,
.shutdown = physmap_flash_shutdown,
.driver = {
.name = "physmap-flash",
.of_match_table = of_flash_match,
},
};
#ifdef CONFIG_MTD_PHYSMAP_COMPAT
static struct physmap_flash_data physmap_flash_data = {
.width = CONFIG_MTD_PHYSMAP_BANKWIDTH,
};
static struct resource physmap_flash_resource = {
.start = CONFIG_MTD_PHYSMAP_START,
.end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN - 1,
.flags = IORESOURCE_MEM,
};
static struct platform_device physmap_flash = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &physmap_flash_data,
},
.num_resources = 1,
.resource = &physmap_flash_resource,
};
#endif
static int __init physmap_init(void)
{
int err;
err = platform_driver_register(&physmap_flash_driver);
#ifdef CONFIG_MTD_PHYSMAP_COMPAT
if (err == 0) {
err = platform_device_register(&physmap_flash);
if (err)
platform_driver_unregister(&physmap_flash_driver);
}
#endif
return err;
}
static void __exit physmap_exit(void)
{
#ifdef CONFIG_MTD_PHYSMAP_COMPAT
platform_device_unregister(&physmap_flash);
#endif
platform_driver_unregister(&physmap_flash_driver);
}
module_init(physmap_init);
module_exit(physmap_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]>");
MODULE_AUTHOR("Vitaly Wool <[email protected]>");
MODULE_AUTHOR("Mike Frysinger <[email protected]>");
MODULE_DESCRIPTION("Generic configurable MTD map driver");
/* legacy platform drivers can't hotplug or coldplg */
#ifndef CONFIG_MTD_PHYSMAP_COMPAT
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:physmap-flash");
#endif
| linux-master | drivers/mtd/maps/physmap-core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
*
* Authors:
* Serge Semin <[email protected]>
*
* Baikal-T1 Physically Mapped Internal ROM driver
*/
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/map.h>
#include <linux/mtd/xip.h>
#include <linux/mux/consumer.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/types.h>
#include "physmap-bt1-rom.h"
/*
* Baikal-T1 SoC ROMs are only accessible by the dword-aligned instructions.
* We have to take this into account when implementing the data read-methods.
* Note there is no need in bothering with endianness, since both Baikal-T1
* CPU and MMIO are LE.
*/
static map_word __xipram bt1_rom_map_read(struct map_info *map,
unsigned long ofs)
{
void __iomem *src = map->virt + ofs;
unsigned int shift;
map_word ret;
u32 data;
/* Read data within offset dword. */
shift = (uintptr_t)src & 0x3;
data = readl_relaxed(src - shift);
if (!shift) {
ret.x[0] = data;
return ret;
}
ret.x[0] = data >> (shift * BITS_PER_BYTE);
/* Read data from the next dword. */
shift = 4 - shift;
if (ofs + shift >= map->size)
return ret;
data = readl_relaxed(src + shift);
ret.x[0] |= data << (shift * BITS_PER_BYTE);
return ret;
}
static void __xipram bt1_rom_map_copy_from(struct map_info *map,
void *to, unsigned long from,
ssize_t len)
{
void __iomem *src = map->virt + from;
unsigned int shift, chunk;
u32 data;
if (len <= 0 || from >= map->size)
return;
/* Make sure we don't go over the map limit. */
len = min_t(ssize_t, map->size - from, len);
/*
* Since requested data size can be pretty big we have to implement
* the copy procedure as optimal as possible. That's why it's split
* up into the next three stages: unaligned head, aligned body,
* unaligned tail.
*/
shift = (uintptr_t)src & 0x3;
if (shift) {
chunk = min_t(ssize_t, 4 - shift, len);
data = readl_relaxed(src - shift);
memcpy(to, (char *)&data + shift, chunk);
src += chunk;
to += chunk;
len -= chunk;
}
while (len >= 4) {
data = readl_relaxed(src);
memcpy(to, &data, 4);
src += 4;
to += 4;
len -= 4;
}
if (len) {
data = readl_relaxed(src);
memcpy(to, &data, len);
}
}
int of_flash_probe_bt1_rom(struct platform_device *pdev,
struct device_node *np,
struct map_info *map)
{
struct device *dev = &pdev->dev;
/* It's supposed to be read-only MTD. */
if (!of_device_is_compatible(np, "mtd-rom")) {
dev_info(dev, "No mtd-rom compatible string\n");
return 0;
}
/* Multiplatform guard. */
if (!of_device_is_compatible(np, "baikal,bt1-int-rom"))
return 0;
/* Sanity check the device parameters retrieved from DTB. */
if (map->bankwidth != 4)
dev_warn(dev, "Bank width is supposed to be 32 bits wide\n");
map->read = bt1_rom_map_read;
map->copy_from = bt1_rom_map_copy_from;
return 0;
}
| linux-master | drivers/mtd/maps/physmap-bt1-rom.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* amd76xrom.c
*
* Normal mappings of chips in physical memory
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/flashchip.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/list.h>
#define xstr(s) str(s)
#define str(s) #s
#define MOD_NAME xstr(KBUILD_BASENAME)
#define ADDRESS_NAME_LEN 18
#define ROM_PROBE_STEP_SIZE (64*1024) /* 64KiB */
struct amd76xrom_window {
void __iomem *virt;
unsigned long phys;
unsigned long size;
struct list_head maps;
struct resource rsrc;
struct pci_dev *pdev;
};
struct amd76xrom_map_info {
struct list_head list;
struct map_info map;
struct mtd_info *mtd;
struct resource rsrc;
char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
};
/* The 2 bits controlling the window size are often set to allow reading
* the BIOS, but too small to allow writing, since the lock registers are
* 4MiB lower in the address space than the data.
*
* This is intended to prevent flashing the bios, perhaps accidentally.
*
* This parameter allows the normal driver to over-ride the BIOS settings.
*
* The bits are 6 and 7. If both bits are set, it is a 5MiB window.
* If only the 7 Bit is set, it is a 4MiB window. Otherwise, a
* 64KiB window.
*
*/
static uint win_size_bits;
module_param(win_size_bits, uint, 0);
MODULE_PARM_DESC(win_size_bits, "ROM window size bits override for 0x43 byte, normally set by BIOS.");
static struct amd76xrom_window amd76xrom_window = {
.maps = LIST_HEAD_INIT(amd76xrom_window.maps),
};
static void amd76xrom_cleanup(struct amd76xrom_window *window)
{
struct amd76xrom_map_info *map, *scratch;
u8 byte;
if (window->pdev) {
/* Disable writes through the rom window */
pci_read_config_byte(window->pdev, 0x40, &byte);
pci_write_config_byte(window->pdev, 0x40, byte & ~1);
pci_dev_put(window->pdev);
}
/* Free all of the mtd devices */
list_for_each_entry_safe(map, scratch, &window->maps, list) {
if (map->rsrc.parent) {
release_resource(&map->rsrc);
}
mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
}
if (window->rsrc.parent)
release_resource(&window->rsrc);
if (window->virt) {
iounmap(window->virt);
window->virt = NULL;
window->phys = 0;
window->size = 0;
window->pdev = NULL;
}
}
static int amd76xrom_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
u8 byte;
struct amd76xrom_window *window = &amd76xrom_window;
struct amd76xrom_map_info *map = NULL;
unsigned long map_top;
/* Remember the pci dev I find the window in - already have a ref */
window->pdev = pdev;
/* Enable the selected rom window. This is often incorrectly
* set up by the BIOS, and the 4MiB offset for the lock registers
* requires the full 5MiB of window space.
*
* This 'write, then read' approach leaves the bits for
* other uses of the hardware info.
*/
pci_read_config_byte(pdev, 0x43, &byte);
pci_write_config_byte(pdev, 0x43, byte | win_size_bits );
/* Assume the rom window is properly setup, and find it's size */
pci_read_config_byte(pdev, 0x43, &byte);
if ((byte & ((1<<7)|(1<<6))) == ((1<<7)|(1<<6))) {
window->phys = 0xffb00000; /* 5MiB */
}
else if ((byte & (1<<7)) == (1<<7)) {
window->phys = 0xffc00000; /* 4MiB */
}
else {
window->phys = 0xffff0000; /* 64KiB */
}
window->size = 0xffffffffUL - window->phys + 1UL;
/*
* Try to reserve the window mem region. If this fails then
* it is likely due to a fragment of the window being
* "reserved" by the BIOS. In the case that the
* request_mem_region() fails then once the rom size is
* discovered we will try to reserve the unreserved fragment.
*/
window->rsrc.name = MOD_NAME;
window->rsrc.start = window->phys;
window->rsrc.end = window->phys + window->size - 1;
window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, &window->rsrc)) {
window->rsrc.parent = NULL;
printk(KERN_ERR MOD_NAME
" %s(): Unable to register resource %pR - kernel bug?\n",
__func__, &window->rsrc);
return -EBUSY;
}
/* Enable writes through the rom window */
pci_read_config_byte(pdev, 0x40, &byte);
pci_write_config_byte(pdev, 0x40, byte | 1);
/* FIXME handle registers 0x80 - 0x8C the bios region locks */
/* For write accesses caches are useless */
window->virt = ioremap(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
goto out;
}
/* Get the first address to look for an rom chip at */
map_top = window->phys;
#if 1
/* The probe sequence run over the firmware hub lock
* registers sets them to 0x7 (no access).
* Probe at most the last 4M of the address space.
*/
if (map_top < 0xffc00000) {
map_top = 0xffc00000;
}
#endif
/* Loop through and look for rom chips */
while((map_top - 1) < 0xffffffffUL) {
struct cfi_private *cfi;
unsigned long offset;
int i;
if (!map) {
map = kmalloc(sizeof(*map), GFP_KERNEL);
if (!map)
goto out;
}
memset(map, 0, sizeof(*map));
INIT_LIST_HEAD(&map->list);
map->map.name = map->map_name;
map->map.phys = map_top;
offset = map_top - window->phys;
map->map.virt = (void __iomem *)
(((unsigned long)(window->virt)) + offset);
map->map.size = 0xffffffffUL - map_top + 1UL;
/* Set the name of the map to the address I am trying */
sprintf(map->map_name, "%s @%08Lx",
MOD_NAME, (unsigned long long)map->map.phys);
/* There is no generic VPP support */
for(map->map.bankwidth = 32; map->map.bankwidth;
map->map.bankwidth >>= 1)
{
char **probe_type;
/* Skip bankwidths that are not supported */
if (!map_bankwidth_supported(map->map.bankwidth))
continue;
/* Setup the map methods */
simple_map_init(&map->map);
/* Try all of the probe methods */
probe_type = rom_probe_types;
for(; *probe_type; probe_type++) {
map->mtd = do_map_probe(*probe_type, &map->map);
if (map->mtd)
goto found;
}
}
map_top += ROM_PROBE_STEP_SIZE;
continue;
found:
/* Trim the size if we are larger than the map */
if (map->mtd->size > map->map.size) {
printk(KERN_WARNING MOD_NAME
" rom(%llu) larger than window(%lu). fixing...\n",
(unsigned long long)map->mtd->size, map->map.size);
map->mtd->size = map->map.size;
}
if (window->rsrc.parent) {
/*
* Registering the MTD device in iomem may not be possible
* if there is a BIOS "reserved" and BUSY range. If this
* fails then continue anyway.
*/
map->rsrc.name = map->map_name;
map->rsrc.start = map->map.phys;
map->rsrc.end = map->map.phys + map->mtd->size - 1;
map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&window->rsrc, &map->rsrc)) {
printk(KERN_ERR MOD_NAME
": cannot reserve MTD resource\n");
map->rsrc.parent = NULL;
}
}
/* Make the whole region visible in the map */
map->map.virt = window->virt;
map->map.phys = window->phys;
cfi = map->map.fldrv_priv;
for(i = 0; i < cfi->numchips; i++) {
cfi->chips[i].start += offset;
}
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
}
/* Calculate the new value of map_top */
map_top += map->mtd->size;
/* File away the map structure */
list_add(&map->list, &window->maps);
map = NULL;
}
out:
/* Free any left over map structures */
kfree(map);
/* See if I have any map structures */
if (list_empty(&window->maps)) {
amd76xrom_cleanup(window);
return -ENODEV;
}
return 0;
}
static void amd76xrom_remove_one(struct pci_dev *pdev)
{
struct amd76xrom_window *window = &amd76xrom_window;
amd76xrom_cleanup(window);
}
static const struct pci_device_id amd76xrom_pci_tbl[] = {
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7440,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_AMD, 0x7468 }, /* amd8111 support */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, amd76xrom_pci_tbl);
#if 0
static struct pci_driver amd76xrom_driver = {
.name = MOD_NAME,
.id_table = amd76xrom_pci_tbl,
.probe = amd76xrom_init_one,
.remove = amd76xrom_remove_one,
};
#endif
static int __init init_amd76xrom(void)
{
struct pci_dev *pdev;
const struct pci_device_id *id;
pdev = NULL;
for(id = amd76xrom_pci_tbl; id->vendor; id++) {
pdev = pci_get_device(id->vendor, id->device, NULL);
if (pdev) {
break;
}
}
if (pdev) {
return amd76xrom_init_one(pdev, &amd76xrom_pci_tbl[0]);
}
return -ENXIO;
#if 0
return pci_register_driver(&amd76xrom_driver);
#endif
}
static void __exit cleanup_amd76xrom(void)
{
amd76xrom_remove_one(amd76xrom_window.pdev);
}
module_init(init_amd76xrom);
module_exit(cleanup_amd76xrom);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Biederman <[email protected]>");
MODULE_DESCRIPTION("MTD map driver for BIOS chips on the AMD76X southbridge");
| linux-master | drivers/mtd/maps/amd76xrom.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 Rafał Miłecki <[email protected]>
*/
#include <linux/bcm47xx_nvram.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include "ofpart_linksys_ns.h"
#define NVRAM_BOOT_PART "bootpartition"
static int ofpart_linksys_ns_bootpartition(void)
{
char buf[4];
int bootpartition;
/* Check CFE environment variable */
if (bcm47xx_nvram_getenv(NVRAM_BOOT_PART, buf, sizeof(buf)) > 0) {
if (!kstrtoint(buf, 0, &bootpartition))
return bootpartition;
pr_warn("Failed to parse %s value \"%s\"\n", NVRAM_BOOT_PART,
buf);
} else {
pr_warn("Failed to get NVRAM \"%s\"\n", NVRAM_BOOT_PART);
}
return 0;
}
int linksys_ns_partitions_post_parse(struct mtd_info *mtd,
struct mtd_partition *parts,
int nr_parts)
{
int bootpartition = ofpart_linksys_ns_bootpartition();
int trx_idx = 0;
int i;
for (i = 0; i < nr_parts; i++) {
if (of_device_is_compatible(parts[i].of_node, "linksys,ns-firmware")) {
if (trx_idx++ == bootpartition)
parts[i].name = "firmware";
else
parts[i].name = "backup";
}
}
return 0;
}
| linux-master | drivers/mtd/parsers/ofpart_linksys_ns.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Flash partitions described by the OF (or flattened) device tree
*
* Copyright © 2006 MontaVista Software Inc.
* Author: Vitaly Wool <[email protected]>
*
* Revised to handle newer style flash binding by:
* Copyright © 2007 David Gibson, IBM Corporation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/mtd/partitions.h>
#include "ofpart_bcm4908.h"
#include "ofpart_linksys_ns.h"
struct fixed_partitions_quirks {
int (*post_parse)(struct mtd_info *mtd, struct mtd_partition *parts, int nr_parts);
};
static struct fixed_partitions_quirks bcm4908_partitions_quirks = {
.post_parse = bcm4908_partitions_post_parse,
};
static struct fixed_partitions_quirks linksys_ns_partitions_quirks = {
.post_parse = linksys_ns_partitions_post_parse,
};
static const struct of_device_id parse_ofpart_match_table[];
static bool node_has_compatible(struct device_node *pp)
{
return of_get_property(pp, "compatible", NULL);
}
static int parse_fixed_partitions(struct mtd_info *master,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
const struct fixed_partitions_quirks *quirks;
const struct of_device_id *of_id;
struct mtd_partition *parts;
struct device_node *mtd_node;
struct device_node *ofpart_node;
const char *partname;
struct device_node *pp;
int nr_parts, i, ret = 0;
bool dedicated = true;
/* Pull of_node from the master device node */
mtd_node = mtd_get_of_node(master);
if (!mtd_node)
return 0;
if (!master->parent) { /* Master */
ofpart_node = of_get_child_by_name(mtd_node, "partitions");
if (!ofpart_node) {
/*
* We might get here even when ofpart isn't used at all (e.g.,
* when using another parser), so don't be louder than
* KERN_DEBUG
*/
pr_debug("%s: 'partitions' subnode not found on %pOF. Trying to parse direct subnodes as partitions.\n",
master->name, mtd_node);
ofpart_node = mtd_node;
dedicated = false;
}
} else { /* Partition */
ofpart_node = mtd_node;
}
of_id = of_match_node(parse_ofpart_match_table, ofpart_node);
if (dedicated && !of_id) {
/* The 'partitions' subnode might be used by another parser */
return 0;
}
quirks = of_id ? of_id->data : NULL;
/* First count the subnodes */
nr_parts = 0;
for_each_child_of_node(ofpart_node, pp) {
if (!dedicated && node_has_compatible(pp))
continue;
nr_parts++;
}
if (nr_parts == 0)
return 0;
parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
if (!parts)
return -ENOMEM;
i = 0;
for_each_child_of_node(ofpart_node, pp) {
const __be32 *reg;
int len;
int a_cells, s_cells;
if (!dedicated && node_has_compatible(pp))
continue;
reg = of_get_property(pp, "reg", &len);
if (!reg) {
if (dedicated) {
pr_debug("%s: ofpart partition %pOF (%pOF) missing reg property.\n",
master->name, pp,
mtd_node);
goto ofpart_fail;
} else {
nr_parts--;
continue;
}
}
a_cells = of_n_addr_cells(pp);
s_cells = of_n_size_cells(pp);
if (!dedicated && s_cells == 0) {
/*
* This is a ugly workaround to not create
* regression on devices that are still creating
* partitions as direct children of the nand controller.
* This can happen in case the nand controller node has
* #size-cells equal to 0 and the firmware (e.g.
* U-Boot) just add the partitions there assuming
* 32-bit addressing.
*
* If you get this warning your firmware and/or DTS
* should be really fixed.
*
* This is working only for devices smaller than 4GiB.
*/
pr_warn("%s: ofpart partition %pOF (%pOF) #size-cells is wrongly set to <0>, assuming <1> for parsing partitions.\n",
master->name, pp, mtd_node);
s_cells = 1;
}
if (len / 4 != a_cells + s_cells) {
pr_debug("%s: ofpart partition %pOF (%pOF) error parsing reg property.\n",
master->name, pp,
mtd_node);
goto ofpart_fail;
}
parts[i].offset = of_read_number(reg, a_cells);
parts[i].size = of_read_number(reg + a_cells, s_cells);
parts[i].of_node = pp;
partname = of_get_property(pp, "label", &len);
if (!partname)
partname = of_get_property(pp, "name", &len);
parts[i].name = partname;
if (of_get_property(pp, "read-only", &len))
parts[i].mask_flags |= MTD_WRITEABLE;
if (of_get_property(pp, "lock", &len))
parts[i].mask_flags |= MTD_POWERUP_LOCK;
if (of_property_read_bool(pp, "slc-mode"))
parts[i].add_flags |= MTD_SLC_ON_MLC_EMULATION;
i++;
}
if (!nr_parts)
goto ofpart_none;
if (quirks && quirks->post_parse)
quirks->post_parse(master, parts, nr_parts);
*pparts = parts;
return nr_parts;
ofpart_fail:
pr_err("%s: error parsing ofpart partition %pOF (%pOF)\n",
master->name, pp, mtd_node);
ret = -EINVAL;
ofpart_none:
of_node_put(pp);
kfree(parts);
return ret;
}
static const struct of_device_id parse_ofpart_match_table[] = {
/* Generic */
{ .compatible = "fixed-partitions" },
/* Customized */
{ .compatible = "brcm,bcm4908-partitions", .data = &bcm4908_partitions_quirks, },
{ .compatible = "linksys,ns-partitions", .data = &linksys_ns_partitions_quirks, },
{},
};
MODULE_DEVICE_TABLE(of, parse_ofpart_match_table);
static struct mtd_part_parser ofpart_parser = {
.parse_fn = parse_fixed_partitions,
.name = "fixed-partitions",
.of_match_table = parse_ofpart_match_table,
};
static int parse_ofoldpart_partitions(struct mtd_info *master,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
struct mtd_partition *parts;
struct device_node *dp;
int i, plen, nr_parts;
const struct {
__be32 offset, len;
} *part;
const char *names;
/* Pull of_node from the master device node */
dp = mtd_get_of_node(master);
if (!dp)
return 0;
part = of_get_property(dp, "partitions", &plen);
if (!part)
return 0; /* No partitions found */
pr_warn("Device tree uses obsolete partition map binding: %pOF\n", dp);
nr_parts = plen / sizeof(part[0]);
parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
if (!parts)
return -ENOMEM;
names = of_get_property(dp, "partition-names", &plen);
for (i = 0; i < nr_parts; i++) {
parts[i].offset = be32_to_cpu(part->offset);
parts[i].size = be32_to_cpu(part->len) & ~1;
/* bit 0 set signifies read only partition */
if (be32_to_cpu(part->len) & 1)
parts[i].mask_flags = MTD_WRITEABLE;
if (names && (plen > 0)) {
int len = strlen(names) + 1;
parts[i].name = names;
plen -= len;
names += len;
} else {
parts[i].name = "unnamed";
}
part++;
}
*pparts = parts;
return nr_parts;
}
static struct mtd_part_parser ofoldpart_parser = {
.parse_fn = parse_ofoldpart_partitions,
.name = "ofoldpart",
};
static int __init ofpart_parser_init(void)
{
register_mtd_parser(&ofpart_parser);
register_mtd_parser(&ofoldpart_parser);
return 0;
}
static void __exit ofpart_parser_exit(void)
{
deregister_mtd_parser(&ofpart_parser);
deregister_mtd_parser(&ofoldpart_parser);
}
module_init(ofpart_parser_init);
module_exit(ofpart_parser_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree");
MODULE_AUTHOR("Vitaly Wool, David Gibson");
/*
* When MTD core cannot find the requested parser, it tries to load the module
* with the same name. Since we provide the ofoldpart parser, we should have
* the corresponding alias.
*/
MODULE_ALIAS("fixed-partitions");
MODULE_ALIAS("ofoldpart");
| linux-master | drivers/mtd/parsers/ofpart_core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 Rafał Miłecki <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/mtd/partitions.h>
#include "ofpart_bcm4908.h"
#define BLPARAMS_FW_OFFSET "NAND_RFS_OFS"
static long long bcm4908_partitions_fw_offset(void)
{
struct device_node *root;
struct property *prop;
const char *s;
root = of_find_node_by_path("/");
if (!root)
return -ENOENT;
of_property_for_each_string(root, "brcm_blparms", prop, s) {
size_t len = strlen(BLPARAMS_FW_OFFSET);
unsigned long offset;
int err;
if (strncmp(s, BLPARAMS_FW_OFFSET, len) || s[len] != '=')
continue;
err = kstrtoul(s + len + 1, 0, &offset);
if (err) {
pr_err("failed to parse %s\n", s + len + 1);
of_node_put(root);
return err;
}
of_node_put(root);
return offset << 10;
}
of_node_put(root);
return -ENOENT;
}
int bcm4908_partitions_post_parse(struct mtd_info *mtd, struct mtd_partition *parts, int nr_parts)
{
long long fw_offset;
int i;
fw_offset = bcm4908_partitions_fw_offset();
for (i = 0; i < nr_parts; i++) {
if (of_device_is_compatible(parts[i].of_node, "brcm,bcm4908-firmware")) {
if (fw_offset < 0 || parts[i].offset == fw_offset)
parts[i].name = "firmware";
else
parts[i].name = "backup";
}
}
return 0;
}
| linux-master | drivers/mtd/parsers/ofpart_bcm4908.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drivers/mtd/scpart.c: Sercomm Partition Parser
*
* Copyright (C) 2018 NOGUCHI Hiroshi
* Copyright (C) 2022 Mikhail Zhilkin
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/module.h>
#define MOD_NAME "scpart"
#ifdef pr_fmt
#undef pr_fmt
#endif
#define pr_fmt(fmt) MOD_NAME ": " fmt
#define ID_ALREADY_FOUND 0xffffffffUL
#define MAP_OFFS_IN_BLK 0x800
#define MAP_MIRROR_NUM 2
static const char sc_part_magic[] = {
'S', 'C', 'F', 'L', 'M', 'A', 'P', 'O', 'K', '\0',
};
#define PART_MAGIC_LEN sizeof(sc_part_magic)
/* assumes that all fields are set by CPU native endian */
struct sc_part_desc {
uint32_t part_id;
uint32_t part_offs;
uint32_t part_bytes;
};
static uint32_t scpart_desc_is_valid(struct sc_part_desc *pdesc)
{
return ((pdesc->part_id != 0xffffffffUL) &&
(pdesc->part_offs != 0xffffffffUL) &&
(pdesc->part_bytes != 0xffffffffUL));
}
static int scpart_scan_partmap(struct mtd_info *master, loff_t partmap_offs,
struct sc_part_desc **ppdesc)
{
int cnt = 0;
int res = 0;
int res2;
uint32_t offs;
size_t retlen;
struct sc_part_desc *pdesc = NULL;
struct sc_part_desc *tmpdesc;
uint8_t *buf;
buf = kzalloc(master->erasesize, GFP_KERNEL);
if (!buf) {
res = -ENOMEM;
goto out;
}
res2 = mtd_read(master, partmap_offs, master->erasesize, &retlen, buf);
if (res2 || retlen != master->erasesize) {
res = -EIO;
goto free;
}
for (offs = MAP_OFFS_IN_BLK;
offs < master->erasesize - sizeof(*tmpdesc);
offs += sizeof(*tmpdesc)) {
tmpdesc = (struct sc_part_desc *)&buf[offs];
if (!scpart_desc_is_valid(tmpdesc))
break;
cnt++;
}
if (cnt > 0) {
int bytes = cnt * sizeof(*pdesc);
pdesc = kcalloc(cnt, sizeof(*pdesc), GFP_KERNEL);
if (!pdesc) {
res = -ENOMEM;
goto free;
}
memcpy(pdesc, &(buf[MAP_OFFS_IN_BLK]), bytes);
*ppdesc = pdesc;
res = cnt;
}
free:
kfree(buf);
out:
return res;
}
static int scpart_find_partmap(struct mtd_info *master,
struct sc_part_desc **ppdesc)
{
int magic_found = 0;
int res = 0;
int res2;
loff_t offs = 0;
size_t retlen;
uint8_t rdbuf[PART_MAGIC_LEN];
while ((magic_found < MAP_MIRROR_NUM) &&
(offs < master->size) &&
!mtd_block_isbad(master, offs)) {
res2 = mtd_read(master, offs, PART_MAGIC_LEN, &retlen, rdbuf);
if (res2 || retlen != PART_MAGIC_LEN) {
res = -EIO;
goto out;
}
if (!memcmp(rdbuf, sc_part_magic, PART_MAGIC_LEN)) {
pr_debug("Signature found at 0x%llx\n", offs);
magic_found++;
res = scpart_scan_partmap(master, offs, ppdesc);
if (res > 0)
goto out;
}
offs += master->erasesize;
}
out:
if (res > 0)
pr_info("Valid 'SC PART MAP' (%d partitions) found at 0x%llx\n", res, offs);
else
pr_info("No valid 'SC PART MAP' was found\n");
return res;
}
static int scpart_parse(struct mtd_info *master,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
const char *partname;
int n;
int nr_scparts;
int nr_parts = 0;
int res = 0;
struct sc_part_desc *scpart_map = NULL;
struct mtd_partition *parts = NULL;
struct device_node *mtd_node;
struct device_node *ofpart_node;
struct device_node *pp;
mtd_node = mtd_get_of_node(master);
if (!mtd_node) {
res = -ENOENT;
goto out;
}
ofpart_node = of_get_child_by_name(mtd_node, "partitions");
if (!ofpart_node) {
pr_info("%s: 'partitions' subnode not found on %pOF.\n",
master->name, mtd_node);
res = -ENOENT;
goto out;
}
nr_scparts = scpart_find_partmap(master, &scpart_map);
if (nr_scparts <= 0) {
pr_info("No any partitions was found in 'SC PART MAP'.\n");
res = -ENOENT;
goto free;
}
parts = kcalloc(of_get_child_count(ofpart_node), sizeof(*parts),
GFP_KERNEL);
if (!parts) {
res = -ENOMEM;
goto free;
}
for_each_child_of_node(ofpart_node, pp) {
u32 scpart_id;
if (of_property_read_u32(pp, "sercomm,scpart-id", &scpart_id))
continue;
for (n = 0 ; n < nr_scparts ; n++)
if ((scpart_map[n].part_id != ID_ALREADY_FOUND) &&
(scpart_id == scpart_map[n].part_id))
break;
if (n >= nr_scparts)
/* not match */
continue;
/* add the partition found in OF into MTD partition array */
parts[nr_parts].offset = scpart_map[n].part_offs;
parts[nr_parts].size = scpart_map[n].part_bytes;
parts[nr_parts].of_node = pp;
if (!of_property_read_string(pp, "label", &partname))
parts[nr_parts].name = partname;
if (of_property_read_bool(pp, "read-only"))
parts[nr_parts].mask_flags |= MTD_WRITEABLE;
if (of_property_read_bool(pp, "lock"))
parts[nr_parts].mask_flags |= MTD_POWERUP_LOCK;
/* mark as 'done' */
scpart_map[n].part_id = ID_ALREADY_FOUND;
nr_parts++;
}
if (nr_parts > 0) {
*pparts = parts;
res = nr_parts;
} else
pr_info("No partition in OF matches partition ID with 'SC PART MAP'.\n");
of_node_put(pp);
free:
of_node_put(ofpart_node);
kfree(scpart_map);
if (res <= 0)
kfree(parts);
out:
return res;
}
static const struct of_device_id scpart_parser_of_match_table[] = {
{ .compatible = "sercomm,sc-partitions" },
{},
};
MODULE_DEVICE_TABLE(of, scpart_parser_of_match_table);
static struct mtd_part_parser scpart_parser = {
.parse_fn = scpart_parse,
.name = "scpart",
.of_match_table = scpart_parser_of_match_table,
};
module_mtd_part_parser(scpart_parser);
/* mtd parsers will request the module by parser name */
MODULE_ALIAS("scpart");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("NOGUCHI Hiroshi <[email protected]>");
MODULE_AUTHOR("Mikhail Zhilkin <[email protected]>");
MODULE_DESCRIPTION("Sercomm partition parser");
| linux-master | drivers/mtd/parsers/scpart.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Parser for TRX format partitions
*
* Copyright (C) 2012 - 2017 Rafał Miłecki <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#define TRX_PARSER_MAX_PARTS 4
/* Magics */
#define TRX_MAGIC 0x30524448
#define UBI_EC_MAGIC 0x23494255 /* UBI# */
struct trx_header {
uint32_t magic;
uint32_t length;
uint32_t crc32;
uint16_t flags;
uint16_t version;
uint32_t offset[3];
} __packed;
static const char *parser_trx_data_part_name(struct mtd_info *master,
size_t offset)
{
uint32_t buf;
size_t bytes_read;
int err;
err = mtd_read(master, offset, sizeof(buf), &bytes_read,
(uint8_t *)&buf);
if (err && !mtd_is_bitflip(err)) {
pr_err("mtd_read error while parsing (offset: 0x%zX): %d\n",
offset, err);
goto out_default;
}
if (buf == UBI_EC_MAGIC)
return "ubi";
out_default:
return "rootfs";
}
static int parser_trx_parse(struct mtd_info *mtd,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
struct device_node *np = mtd_get_of_node(mtd);
struct mtd_partition *parts;
struct mtd_partition *part;
struct trx_header trx;
size_t bytes_read;
uint8_t curr_part = 0, i = 0;
uint32_t trx_magic = TRX_MAGIC;
int err;
/* Get different magic from device tree if specified */
err = of_property_read_u32(np, "brcm,trx-magic", &trx_magic);
if (err != 0 && err != -EINVAL)
pr_err("failed to parse \"brcm,trx-magic\" DT attribute, using default: %d\n", err);
parts = kcalloc(TRX_PARSER_MAX_PARTS, sizeof(struct mtd_partition),
GFP_KERNEL);
if (!parts)
return -ENOMEM;
err = mtd_read(mtd, 0, sizeof(trx), &bytes_read, (uint8_t *)&trx);
if (err) {
pr_err("MTD reading error: %d\n", err);
kfree(parts);
return err;
}
if (trx.magic != trx_magic) {
kfree(parts);
return -ENOENT;
}
/* We have LZMA loader if there is address in offset[2] */
if (trx.offset[2]) {
part = &parts[curr_part++];
part->name = "loader";
part->offset = trx.offset[i];
i++;
}
if (trx.offset[i]) {
part = &parts[curr_part++];
part->name = "linux";
part->offset = trx.offset[i];
i++;
}
if (trx.offset[i]) {
part = &parts[curr_part++];
part->name = parser_trx_data_part_name(mtd, trx.offset[i]);
part->offset = trx.offset[i];
i++;
}
/*
* Assume that every partition ends at the beginning of the one it is
* followed by.
*/
for (i = 0; i < curr_part; i++) {
u64 next_part_offset = (i < curr_part - 1) ?
parts[i + 1].offset : mtd->size;
parts[i].size = next_part_offset - parts[i].offset;
}
*pparts = parts;
return i;
};
static const struct of_device_id mtd_parser_trx_of_match_table[] = {
{ .compatible = "brcm,trx" },
{},
};
MODULE_DEVICE_TABLE(of, mtd_parser_trx_of_match_table);
static struct mtd_part_parser mtd_parser_trx = {
.parse_fn = parser_trx_parse,
.name = "trx",
.of_match_table = mtd_parser_trx_of_match_table,
};
module_mtd_part_parser(mtd_parser_trx);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Parser for TRX format partitions");
| linux-master | drivers/mtd/parsers/parser_trx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright © 2007 Eugene Konev <[email protected]>
*
* TI AR7 flash partition table.
* Based on ar7 map by Felix Fietkau <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <uapi/linux/magic.h>
#define AR7_PARTS 4
#define ROOT_OFFSET 0xe0000
#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42)
#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281)
struct ar7_bin_rec {
unsigned int checksum;
unsigned int length;
unsigned int address;
};
static int create_mtd_partitions(struct mtd_info *master,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
struct ar7_bin_rec header;
unsigned int offset;
size_t len;
unsigned int pre_size = master->erasesize, post_size = 0;
unsigned int root_offset = ROOT_OFFSET;
int retries = 10;
struct mtd_partition *ar7_parts;
ar7_parts = kcalloc(AR7_PARTS, sizeof(*ar7_parts), GFP_KERNEL);
if (!ar7_parts)
return -ENOMEM;
ar7_parts[0].name = "loader";
ar7_parts[0].offset = 0;
ar7_parts[0].size = master->erasesize;
ar7_parts[0].mask_flags = MTD_WRITEABLE;
ar7_parts[1].name = "config";
ar7_parts[1].offset = 0;
ar7_parts[1].size = master->erasesize;
ar7_parts[1].mask_flags = 0;
do { /* Try 10 blocks starting from master->erasesize */
offset = pre_size;
mtd_read(master, offset, sizeof(header), &len,
(uint8_t *)&header);
if (!strncmp((char *)&header, "TIENV0.8", 8))
ar7_parts[1].offset = pre_size;
if (header.checksum == LOADER_MAGIC1)
break;
if (header.checksum == LOADER_MAGIC2)
break;
pre_size += master->erasesize;
} while (retries--);
pre_size = offset;
if (!ar7_parts[1].offset) {
ar7_parts[1].offset = master->size - master->erasesize;
post_size = master->erasesize;
}
switch (header.checksum) {
case LOADER_MAGIC1:
while (header.length) {
offset += sizeof(header) + header.length;
mtd_read(master, offset, sizeof(header), &len,
(uint8_t *)&header);
}
root_offset = offset + sizeof(header) + 4;
break;
case LOADER_MAGIC2:
while (header.length) {
offset += sizeof(header) + header.length;
mtd_read(master, offset, sizeof(header), &len,
(uint8_t *)&header);
}
root_offset = offset + sizeof(header) + 4 + 0xff;
root_offset &= ~(uint32_t)0xff;
break;
default:
printk(KERN_WARNING "Unknown magic: %08x\n", header.checksum);
break;
}
mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header);
if (header.checksum != SQUASHFS_MAGIC) {
root_offset += master->erasesize - 1;
root_offset &= ~(master->erasesize - 1);
}
ar7_parts[2].name = "linux";
ar7_parts[2].offset = pre_size;
ar7_parts[2].size = master->size - pre_size - post_size;
ar7_parts[2].mask_flags = 0;
ar7_parts[3].name = "rootfs";
ar7_parts[3].offset = root_offset;
ar7_parts[3].size = master->size - root_offset - post_size;
ar7_parts[3].mask_flags = 0;
*pparts = ar7_parts;
return AR7_PARTS;
}
static struct mtd_part_parser ar7_parser = {
.parse_fn = create_mtd_partitions,
.name = "ar7part",
};
module_mtd_part_parser(ar7_parser);
MODULE_LICENSE("GPL");
MODULE_AUTHOR( "Felix Fietkau <[email protected]>, "
"Eugene Konev <[email protected]>");
MODULE_DESCRIPTION("MTD partitioning for TI AR7");
| linux-master | drivers/mtd/parsers/ar7part.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* BCM63XX CFE image tag parser
*
* Copyright © 2006-2008 Florian Fainelli <[email protected]>
* Mike Albon <[email protected]>
* Copyright © 2009-2010 Daniel Dickinson <[email protected]>
* Copyright © 2011-2013 Jonas Gorski <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bcm963xx_nvram.h>
#include <linux/bcm963xx_tag.h>
#include <linux/crc32.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
#ifdef CONFIG_MIPS
#include <asm/bootinfo.h>
#include <asm/fw/cfe/cfe_api.h>
#endif /* CONFIG_MIPS */
#define BCM963XX_CFE_BLOCK_SIZE SZ_64K /* always at least 64KiB */
#define BCM963XX_CFE_MAGIC_OFFSET 0x4e0
#define BCM963XX_CFE_VERSION_OFFSET 0x570
#define BCM963XX_NVRAM_OFFSET 0x580
/* Ensure strings read from flash structs are null terminated */
#define STR_NULL_TERMINATE(x) \
do { char *_str = (x); _str[sizeof(x) - 1] = 0; } while (0)
static inline int bcm63xx_detect_cfe(void)
{
int ret = 0;
#ifdef CONFIG_MIPS
ret = (fw_arg3 == CFE_EPTSEAL);
#endif /* CONFIG_MIPS */
return ret;
}
static int bcm63xx_read_nvram(struct mtd_info *master,
struct bcm963xx_nvram *nvram)
{
u32 actual_crc, expected_crc;
size_t retlen;
int ret;
/* extract nvram data */
ret = mtd_read(master, BCM963XX_NVRAM_OFFSET, BCM963XX_NVRAM_V5_SIZE,
&retlen, (void *)nvram);
if (ret)
return ret;
ret = bcm963xx_nvram_checksum(nvram, &expected_crc, &actual_crc);
if (ret)
pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n",
expected_crc, actual_crc);
if (!nvram->psi_size)
nvram->psi_size = BCM963XX_DEFAULT_PSI_SIZE;
return 0;
}
static const char * const bcm63xx_cfe_part_types[] = {
"bcm963xx-imagetag",
NULL,
};
static int bcm63xx_parse_cfe_nor_partitions(struct mtd_info *master,
const struct mtd_partition **pparts, struct bcm963xx_nvram *nvram)
{
struct mtd_partition *parts;
int nrparts = 3, curpart = 0;
unsigned int cfelen, nvramlen;
unsigned int cfe_erasesize;
int i;
cfe_erasesize = max_t(uint32_t, master->erasesize,
BCM963XX_CFE_BLOCK_SIZE);
cfelen = cfe_erasesize;
nvramlen = nvram->psi_size * SZ_1K;
nvramlen = roundup(nvramlen, cfe_erasesize);
parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
if (!parts)
return -ENOMEM;
/* Start building partition list */
parts[curpart].name = "CFE";
parts[curpart].offset = 0;
parts[curpart].size = cfelen;
curpart++;
parts[curpart].name = "nvram";
parts[curpart].offset = master->size - nvramlen;
parts[curpart].size = nvramlen;
curpart++;
/* Global partition "linux" to make easy firmware upgrade */
parts[curpart].name = "linux";
parts[curpart].offset = cfelen;
parts[curpart].size = master->size - cfelen - nvramlen;
parts[curpart].types = bcm63xx_cfe_part_types;
for (i = 0; i < nrparts; i++)
pr_info("Partition %d is %s offset %llx and length %llx\n", i,
parts[i].name, parts[i].offset, parts[i].size);
*pparts = parts;
return nrparts;
}
static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
struct bcm963xx_nvram *nvram = NULL;
int ret;
if (!bcm63xx_detect_cfe())
return -EINVAL;
nvram = vzalloc(sizeof(*nvram));
if (!nvram)
return -ENOMEM;
ret = bcm63xx_read_nvram(master, nvram);
if (ret)
goto out;
if (!mtd_type_is_nand(master))
ret = bcm63xx_parse_cfe_nor_partitions(master, pparts, nvram);
else
ret = -EINVAL;
out:
vfree(nvram);
return ret;
};
static const struct of_device_id parse_bcm63xx_cfe_match_table[] = {
{ .compatible = "brcm,bcm963xx-cfe-nor-partitions" },
{},
};
MODULE_DEVICE_TABLE(of, parse_bcm63xx_cfe_match_table);
static struct mtd_part_parser bcm63xx_cfe_parser = {
.parse_fn = bcm63xx_parse_cfe_partitions,
.name = "bcm63xxpart",
.of_match_table = parse_bcm63xx_cfe_match_table,
};
module_mtd_part_parser(bcm63xx_cfe_parser);
MODULE_AUTHOR("Daniel Dickinson <[email protected]>");
MODULE_AUTHOR("Florian Fainelli <[email protected]>");
MODULE_AUTHOR("Mike Albon <[email protected]>");
MODULE_AUTHOR("Jonas Gorski <[email protected]");
MODULE_DESCRIPTION("MTD partitioning for BCM63XX CFE bootloaders");
| linux-master | drivers/mtd/parsers/bcm63xxpart.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Qualcomm SMEM NAND flash partition parser
*
* Copyright (C) 2020, Linaro Ltd.
*/
#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
#include <linux/soc/qcom/smem.h>
#define SMEM_AARM_PARTITION_TABLE 9
#define SMEM_APPS 0
#define SMEM_FLASH_PART_MAGIC1 0x55ee73aa
#define SMEM_FLASH_PART_MAGIC2 0xe35ebddb
#define SMEM_FLASH_PTABLE_V3 3
#define SMEM_FLASH_PTABLE_V4 4
#define SMEM_FLASH_PTABLE_MAX_PARTS_V3 16
#define SMEM_FLASH_PTABLE_MAX_PARTS_V4 48
#define SMEM_FLASH_PTABLE_HDR_LEN (4 * sizeof(u32))
#define SMEM_FLASH_PTABLE_NAME_SIZE 16
/**
* struct smem_flash_pentry - SMEM Flash partition entry
* @name: Name of the partition
* @offset: Offset in blocks
* @length: Length of the partition in blocks
* @attr: Flags for this partition
*/
struct smem_flash_pentry {
char name[SMEM_FLASH_PTABLE_NAME_SIZE];
__le32 offset;
__le32 length;
u8 attr;
} __packed __aligned(4);
/**
* struct smem_flash_ptable - SMEM Flash partition table
* @magic1: Partition table Magic 1
* @magic2: Partition table Magic 2
* @version: Partition table version
* @numparts: Number of partitions in this ptable
* @pentry: Flash partition entries belonging to this ptable
*/
struct smem_flash_ptable {
__le32 magic1;
__le32 magic2;
__le32 version;
__le32 numparts;
struct smem_flash_pentry pentry[SMEM_FLASH_PTABLE_MAX_PARTS_V4];
} __packed __aligned(4);
static int parse_qcomsmem_part(struct mtd_info *mtd,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
int ret, i, j, tmpparts, numparts = 0;
struct smem_flash_pentry *pentry;
struct smem_flash_ptable *ptable;
struct mtd_partition *parts;
char *name, *c;
if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
&& mtd->type == MTD_NORFLASH) {
pr_err("%s: SMEM partition parser is incompatible with 4K sectors\n",
mtd->name);
return -EINVAL;
}
pr_debug("Parsing partition table info from SMEM\n");
ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
if (IS_ERR(ptable)) {
if (PTR_ERR(ptable) != -EPROBE_DEFER)
pr_err("Error reading partition table header\n");
return PTR_ERR(ptable);
}
/* Verify ptable magic */
if (le32_to_cpu(ptable->magic1) != SMEM_FLASH_PART_MAGIC1 ||
le32_to_cpu(ptable->magic2) != SMEM_FLASH_PART_MAGIC2) {
pr_err("Partition table magic verification failed\n");
return -EINVAL;
}
/* Ensure that # of partitions is less than the max we have allocated */
tmpparts = le32_to_cpu(ptable->numparts);
if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
pr_err("Partition numbers exceed the max limit\n");
return -EINVAL;
}
/* Find out length of partition data based on table version */
if (le32_to_cpu(ptable->version) <= SMEM_FLASH_PTABLE_V3) {
len = SMEM_FLASH_PTABLE_HDR_LEN + SMEM_FLASH_PTABLE_MAX_PARTS_V3 *
sizeof(struct smem_flash_pentry);
} else if (le32_to_cpu(ptable->version) == SMEM_FLASH_PTABLE_V4) {
len = SMEM_FLASH_PTABLE_HDR_LEN + SMEM_FLASH_PTABLE_MAX_PARTS_V4 *
sizeof(struct smem_flash_pentry);
} else {
pr_err("Unknown ptable version (%d)", le32_to_cpu(ptable->version));
return -EINVAL;
}
/*
* Now that the partition table header has been parsed, verified
* and the length of the partition table calculated, read the
* complete partition table
*/
ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
if (IS_ERR(ptable)) {
pr_err("Error reading partition table\n");
return PTR_ERR(ptable);
}
for (i = 0; i < tmpparts; i++) {
pentry = &ptable->pentry[i];
if (pentry->name[0] != '\0')
numparts++;
}
parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL);
if (!parts)
return -ENOMEM;
for (i = 0, j = 0; i < tmpparts; i++) {
pentry = &ptable->pentry[i];
if (pentry->name[0] == '\0')
continue;
name = kstrdup(pentry->name, GFP_KERNEL);
if (!name) {
ret = -ENOMEM;
goto out_free_parts;
}
/* Convert name to lower case */
for (c = name; *c != '\0'; c++)
*c = tolower(*c);
parts[j].name = name;
parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
parts[j].mask_flags = pentry->attr;
parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize;
pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
i, pentry->name, le32_to_cpu(pentry->offset),
le32_to_cpu(pentry->length), pentry->attr);
j++;
}
pr_debug("SMEM partition table found: ver: %d len: %d\n",
le32_to_cpu(ptable->version), tmpparts);
*pparts = parts;
return numparts;
out_free_parts:
while (--j >= 0)
kfree(parts[j].name);
kfree(parts);
*pparts = NULL;
return ret;
}
static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts,
int nr_parts)
{
int i;
for (i = 0; i < nr_parts; i++)
kfree(pparts[i].name);
kfree(pparts);
}
static const struct of_device_id qcomsmem_of_match_table[] = {
{ .compatible = "qcom,smem-part" },
{},
};
MODULE_DEVICE_TABLE(of, qcomsmem_of_match_table);
static struct mtd_part_parser mtd_parser_qcomsmem = {
.parse_fn = parse_qcomsmem_part,
.cleanup = parse_qcomsmem_cleanup,
.name = "qcomsmem",
.of_match_table = qcomsmem_of_match_table,
};
module_mtd_part_parser(mtd_parser_qcomsmem);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>");
MODULE_DESCRIPTION("Qualcomm SMEM NAND flash partition parser");
| linux-master | drivers/mtd/parsers/qcomsmempart.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Parse RedBoot-style Flash Image System (FIS) tables and
* produce a Linux partition array to match.
*
* Copyright © 2001 Red Hat UK Limited
* Copyright © 2001-2010 David Woodhouse <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/of.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/module.h>
struct fis_image_desc {
unsigned char name[16]; // Null terminated name
u32 flash_base; // Address within FLASH of image
u32 mem_base; // Address in memory where it executes
u32 size; // Length of image
u32 entry_point; // Execution entry point
u32 data_length; // Length of actual data
unsigned char _pad[256 - (16 + 7 * sizeof(u32))];
u32 desc_cksum; // Checksum over image descriptor
u32 file_cksum; // Checksum over image data
};
struct fis_list {
struct fis_image_desc *img;
struct fis_list *next;
};
static int directory = CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK;
module_param(directory, int, 0);
static inline int redboot_checksum(struct fis_image_desc *img)
{
/* RedBoot doesn't actually write the desc_cksum field yet AFAICT */
return 1;
}
static void parse_redboot_of(struct mtd_info *master)
{
struct device_node *np;
struct device_node *npart;
u32 dirblock;
int ret;
np = mtd_get_of_node(master);
if (!np)
return;
npart = of_get_child_by_name(np, "partitions");
if (!npart)
return;
ret = of_property_read_u32(npart, "fis-index-block", &dirblock);
of_node_put(npart);
if (ret)
return;
/*
* Assign the block found in the device tree to the local
* directory block pointer.
*/
directory = dirblock;
}
static int parse_redboot_partitions(struct mtd_info *master,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
int nrparts = 0;
struct fis_image_desc *buf;
struct mtd_partition *parts;
struct fis_list *fl = NULL, *tmp_fl;
int ret, i;
size_t retlen;
char *names;
char *nullname;
int namelen = 0;
int nulllen = 0;
int numslots;
unsigned long offset;
#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
static char nullstring[] = "unallocated";
#endif
parse_redboot_of(master);
if (directory < 0) {
offset = master->size + directory * master->erasesize;
while (mtd_block_isbad(master, offset)) {
if (!offset) {
nogood:
pr_notice("Failed to find a non-bad block to check for RedBoot partition table\n");
return -EIO;
}
offset -= master->erasesize;
}
} else {
offset = directory * master->erasesize;
while (mtd_block_isbad(master, offset)) {
offset += master->erasesize;
if (offset == master->size)
goto nogood;
}
}
buf = vmalloc(master->erasesize);
if (!buf)
return -ENOMEM;
pr_notice("Searching for RedBoot partition table in %s at offset 0x%lx\n",
master->name, offset);
ret = mtd_read(master, offset, master->erasesize, &retlen,
(void *)buf);
if (ret)
goto out;
if (retlen != master->erasesize) {
ret = -EIO;
goto out;
}
numslots = (master->erasesize / sizeof(struct fis_image_desc));
for (i = 0; i < numslots; i++) {
if (!memcmp(buf[i].name, "FIS directory", 14)) {
/* This is apparently the FIS directory entry for the
* FIS directory itself. The FIS directory size is
* one erase block; if the buf[i].size field is
* swab32(erasesize) then we know we are looking at
* a byte swapped FIS directory - swap all the entries!
* (NOTE: this is 'size' not 'data_length'; size is
* the full size of the entry.)
*/
/* RedBoot can combine the FIS directory and
config partitions into a single eraseblock;
we assume wrong-endian if either the swapped
'size' matches the eraseblock size precisely,
or if the swapped size actually fits in an
eraseblock while the unswapped size doesn't. */
if (swab32(buf[i].size) == master->erasesize ||
(buf[i].size > master->erasesize
&& swab32(buf[i].size) < master->erasesize)) {
int j;
/* Update numslots based on actual FIS directory size */
numslots = swab32(buf[i].size) / sizeof(struct fis_image_desc);
for (j = 0; j < numslots; ++j) {
/* A single 0xff denotes a deleted entry.
* Two of them in a row is the end of the table.
*/
if (buf[j].name[0] == 0xff) {
if (buf[j].name[1] == 0xff) {
break;
} else {
continue;
}
}
/* The unsigned long fields were written with the
* wrong byte sex, name and pad have no byte sex.
*/
swab32s(&buf[j].flash_base);
swab32s(&buf[j].mem_base);
swab32s(&buf[j].size);
swab32s(&buf[j].entry_point);
swab32s(&buf[j].data_length);
swab32s(&buf[j].desc_cksum);
swab32s(&buf[j].file_cksum);
}
} else if (buf[i].size < master->erasesize) {
/* Update numslots based on actual FIS directory size */
numslots = buf[i].size / sizeof(struct fis_image_desc);
}
break;
}
}
if (i == numslots) {
/* Didn't find it */
pr_notice("No RedBoot partition table detected in %s\n",
master->name);
ret = 0;
goto out;
}
for (i = 0; i < numslots; i++) {
struct fis_list *new_fl, **prev;
if (buf[i].name[0] == 0xff) {
if (buf[i].name[1] == 0xff) {
break;
} else {
continue;
}
}
if (!redboot_checksum(&buf[i]))
break;
new_fl = kmalloc(sizeof(struct fis_list), GFP_KERNEL);
namelen += strlen(buf[i].name) + 1;
if (!new_fl) {
ret = -ENOMEM;
goto out;
}
new_fl->img = &buf[i];
if (data && data->origin)
buf[i].flash_base -= data->origin;
else
buf[i].flash_base &= master->size - 1;
/* I'm sure the JFFS2 code has done me permanent damage.
* I now think the following is _normal_
*/
prev = &fl;
while (*prev && (*prev)->img->flash_base < new_fl->img->flash_base)
prev = &(*prev)->next;
new_fl->next = *prev;
*prev = new_fl;
nrparts++;
}
#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
if (fl->img->flash_base) {
nrparts++;
nulllen = sizeof(nullstring);
}
for (tmp_fl = fl; tmp_fl->next; tmp_fl = tmp_fl->next) {
if (tmp_fl->img->flash_base + tmp_fl->img->size + master->erasesize <= tmp_fl->next->img->flash_base) {
nrparts++;
nulllen = sizeof(nullstring);
}
}
#endif
parts = kzalloc(sizeof(*parts) * nrparts + nulllen + namelen, GFP_KERNEL);
if (!parts) {
ret = -ENOMEM;
goto out;
}
nullname = (char *)&parts[nrparts];
#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
if (nulllen > 0)
strcpy(nullname, nullstring);
#endif
names = nullname + nulllen;
i = 0;
#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
if (fl->img->flash_base) {
parts[0].name = nullname;
parts[0].size = fl->img->flash_base;
parts[0].offset = 0;
i++;
}
#endif
for ( ; i < nrparts; i++) {
parts[i].size = fl->img->size;
parts[i].offset = fl->img->flash_base;
parts[i].name = names;
strcpy(names, fl->img->name);
#ifdef CONFIG_MTD_REDBOOT_PARTS_READONLY
if (!memcmp(names, "RedBoot", 8) ||
!memcmp(names, "RedBoot config", 15) ||
!memcmp(names, "FIS directory", 14)) {
parts[i].mask_flags = MTD_WRITEABLE;
}
#endif
names += strlen(names) + 1;
#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
if (fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
i++;
parts[i].offset = parts[i - 1].size + parts[i - 1].offset;
parts[i].size = fl->next->img->flash_base - parts[i].offset;
parts[i].name = nullname;
}
#endif
tmp_fl = fl;
fl = fl->next;
kfree(tmp_fl);
}
ret = nrparts;
*pparts = parts;
out:
while (fl) {
struct fis_list *old = fl;
fl = fl->next;
kfree(old);
}
vfree(buf);
return ret;
}
static const struct of_device_id mtd_parser_redboot_of_match_table[] = {
{ .compatible = "redboot-fis" },
{},
};
MODULE_DEVICE_TABLE(of, mtd_parser_redboot_of_match_table);
static struct mtd_part_parser redboot_parser = {
.parse_fn = parse_redboot_partitions,
.name = "RedBoot",
.of_match_table = mtd_parser_redboot_of_match_table,
};
module_mtd_part_parser(redboot_parser);
/* mtd parsers will request the module by parser name */
MODULE_ALIAS("RedBoot");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]>");
MODULE_DESCRIPTION("Parsing code for RedBoot Flash Image System (FIS) tables");
| linux-master | drivers/mtd/parsers/redboot.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* BCM63XX CFE image tag parser
*
* Copyright © 2006-2008 Florian Fainelli <[email protected]>
* Mike Albon <[email protected]>
* Copyright © 2009-2010 Daniel Dickinson <[email protected]>
* Copyright © 2011-2013 Jonas Gorski <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bcm963xx_tag.h>
#include <linux/crc32.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
/* Ensure strings read from flash structs are null terminated */
#define STR_NULL_TERMINATE(x) \
do { char *_str = (x); _str[sizeof(x) - 1] = 0; } while (0)
static int bcm963xx_read_imagetag(struct mtd_info *master, const char *name,
loff_t tag_offset, struct bcm_tag *buf)
{
int ret;
size_t retlen;
u32 computed_crc;
ret = mtd_read(master, tag_offset, sizeof(*buf), &retlen, (void *)buf);
if (ret)
return ret;
if (retlen != sizeof(*buf))
return -EIO;
computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
offsetof(struct bcm_tag, header_crc));
if (computed_crc == buf->header_crc) {
STR_NULL_TERMINATE(buf->board_id);
STR_NULL_TERMINATE(buf->tag_version);
pr_info("%s: CFE image tag found at 0x%llx with version %s, board type %s\n",
name, tag_offset, buf->tag_version, buf->board_id);
return 0;
}
pr_warn("%s: CFE image tag at 0x%llx CRC invalid (expected %08x, actual %08x)\n",
name, tag_offset, buf->header_crc, computed_crc);
return -EINVAL;
}
static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
/* CFE, NVRAM and global Linux are always present */
int nrparts = 0, curpart = 0;
struct bcm_tag *buf = NULL;
struct mtd_partition *parts;
int ret;
unsigned int rootfsaddr, kerneladdr, spareaddr, offset;
unsigned int rootfslen, kernellen, sparelen, totallen;
int i;
bool rootfs_first = false;
buf = vmalloc(sizeof(struct bcm_tag));
if (!buf)
return -ENOMEM;
/* Get the tag */
ret = bcm963xx_read_imagetag(master, "rootfs", 0, buf);
if (!ret) {
STR_NULL_TERMINATE(buf->flash_image_start);
if (kstrtouint(buf->flash_image_start, 10, &rootfsaddr) ||
rootfsaddr < BCM963XX_EXTENDED_SIZE) {
pr_err("invalid rootfs address: %*ph\n",
(int)sizeof(buf->flash_image_start),
buf->flash_image_start);
ret = -EINVAL;
goto out;
}
STR_NULL_TERMINATE(buf->kernel_address);
if (kstrtouint(buf->kernel_address, 10, &kerneladdr) ||
kerneladdr < BCM963XX_EXTENDED_SIZE) {
pr_err("invalid kernel address: %*ph\n",
(int)sizeof(buf->kernel_address),
buf->kernel_address);
ret = -EINVAL;
goto out;
}
STR_NULL_TERMINATE(buf->kernel_length);
if (kstrtouint(buf->kernel_length, 10, &kernellen)) {
pr_err("invalid kernel length: %*ph\n",
(int)sizeof(buf->kernel_length),
buf->kernel_length);
ret = -EINVAL;
goto out;
}
STR_NULL_TERMINATE(buf->total_length);
if (kstrtouint(buf->total_length, 10, &totallen)) {
pr_err("invalid total length: %*ph\n",
(int)sizeof(buf->total_length),
buf->total_length);
ret = -EINVAL;
goto out;
}
/*
* Addresses are flash absolute, so convert to partition
* relative addresses. Assume either kernel or rootfs will
* directly follow the image tag.
*/
if (rootfsaddr < kerneladdr)
offset = rootfsaddr - sizeof(struct bcm_tag);
else
offset = kerneladdr - sizeof(struct bcm_tag);
kerneladdr = kerneladdr - offset;
rootfsaddr = rootfsaddr - offset;
spareaddr = roundup(totallen, master->erasesize);
if (rootfsaddr < kerneladdr) {
/* default Broadcom layout */
rootfslen = kerneladdr - rootfsaddr;
rootfs_first = true;
} else {
/* OpenWrt layout */
rootfsaddr = kerneladdr + kernellen;
rootfslen = spareaddr - rootfsaddr;
}
} else {
goto out;
}
sparelen = master->size - spareaddr;
/* Determine number of partitions */
if (rootfslen > 0)
nrparts++;
if (kernellen > 0)
nrparts++;
parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
if (!parts) {
ret = -ENOMEM;
goto out;
}
/* Start building partition list */
if (kernellen > 0) {
int kernelpart = curpart;
if (rootfslen > 0 && rootfs_first)
kernelpart++;
parts[kernelpart].name = "kernel";
parts[kernelpart].offset = kerneladdr;
parts[kernelpart].size = kernellen;
curpart++;
}
if (rootfslen > 0) {
int rootfspart = curpart;
if (kernellen > 0 && rootfs_first)
rootfspart--;
parts[rootfspart].name = "rootfs";
parts[rootfspart].offset = rootfsaddr;
parts[rootfspart].size = rootfslen;
if (sparelen > 0 && !rootfs_first)
parts[rootfspart].size += sparelen;
curpart++;
}
for (i = 0; i < nrparts; i++)
pr_info("Partition %d is %s offset %llx and length %llx\n", i,
parts[i].name, parts[i].offset, parts[i].size);
pr_info("Spare partition is offset %x and length %x\n", spareaddr,
sparelen);
*pparts = parts;
ret = 0;
out:
vfree(buf);
if (ret)
return ret;
return nrparts;
}
static const struct of_device_id parse_bcm963xx_imagetag_match_table[] = {
{ .compatible = "brcm,bcm963xx-imagetag" },
{},
};
MODULE_DEVICE_TABLE(of, parse_bcm963xx_imagetag_match_table);
static struct mtd_part_parser bcm963xx_imagetag_parser = {
.parse_fn = bcm963xx_parse_imagetag_partitions,
.name = "bcm963xx-imagetag",
.of_match_table = parse_bcm963xx_imagetag_match_table,
};
module_mtd_part_parser(bcm963xx_imagetag_parser);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Dickinson <[email protected]>");
MODULE_AUTHOR("Florian Fainelli <[email protected]>");
MODULE_AUTHOR("Mike Albon <[email protected]>");
MODULE_AUTHOR("Jonas Gorski <[email protected]>");
MODULE_DESCRIPTION("MTD parser for BCM963XX CFE Image Tag partitions");
| linux-master | drivers/mtd/parsers/parser_imagetag.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Read flash partition table from command line
*
* Copyright © 2002 SYSGO Real-Time Solutions GmbH
* Copyright © 2002-2010 David Woodhouse <[email protected]>
*
* The format for the command line is as follows:
*
* mtdparts=<mtddef>[;<mtddef]
* <mtddef> := <mtd-id>:<partdef>[,<partdef>]
* <partdef> := <size>[@<offset>][<name>][ro][lk][slc]
* <mtd-id> := unique name used in mapping driver/device (mtd->name)
* <size> := standard linux memsize OR "-" to denote all remaining space
* size is automatically truncated at end of device
* if specified or truncated size is 0 the part is skipped
* <offset> := standard linux memsize
* if omitted the part will immediately follow the previous part
* or 0 if the first part
* <name> := '(' NAME ')'
* NAME will appear in /proc/mtd
*
* <size> and <offset> can be specified such that the parts are out of order
* in physical memory and may even overlap.
*
* The parts are assigned MTD numbers in the order they are specified in the
* command line regardless of their order in physical memory.
*
* Examples:
*
* 1 NOR Flash, with 1 single writable partition:
* edb7312-nor:-
*
* 1 NOR Flash with 2 partitions, 1 NAND with one
* edb7312-nor:256k(ARMboot)ro,-(root);edb7312-nand:-(home)
*/
#define pr_fmt(fmt) "mtd: " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/module.h>
#include <linux/err.h>
/* debug macro */
#if 0
#define dbg(x) do { printk("DEBUG-CMDLINE-PART: "); printk x; } while(0)
#else
#define dbg(x)
#endif
/* special size referring to all the remaining space in a partition */
#define SIZE_REMAINING ULLONG_MAX
#define OFFSET_CONTINUOUS ULLONG_MAX
struct cmdline_mtd_partition {
struct cmdline_mtd_partition *next;
char *mtd_id;
int num_parts;
struct mtd_partition *parts;
};
/* mtdpart_setup() parses into here */
static struct cmdline_mtd_partition *partitions;
/* the command line passed to mtdpart_setup() */
static char *mtdparts;
static char *cmdline;
static int cmdline_parsed;
/*
* Parse one partition definition for an MTD. Since there can be many
* comma separated partition definitions, this function calls itself
* recursively until no more partition definitions are found. Nice side
* effect: the memory to keep the mtd_partition structs and the names
* is allocated upon the last definition being found. At that point the
* syntax has been verified ok.
*/
static struct mtd_partition * newpart(char *s,
char **retptr,
int *num_parts,
int this_part,
unsigned char **extra_mem_ptr,
int extra_mem_size)
{
struct mtd_partition *parts;
unsigned long long size, offset = OFFSET_CONTINUOUS;
char *name;
int name_len;
unsigned char *extra_mem;
char delim;
unsigned int mask_flags, add_flags;
/* fetch the partition size */
if (*s == '-') {
/* assign all remaining space to this partition */
size = SIZE_REMAINING;
s++;
} else {
size = memparse(s, &s);
if (!size) {
pr_err("partition has size 0\n");
return ERR_PTR(-EINVAL);
}
}
/* fetch partition name and flags */
mask_flags = 0; /* this is going to be a regular partition */
add_flags = 0;
delim = 0;
/* check for offset */
if (*s == '@') {
s++;
offset = memparse(s, &s);
}
/* now look for name */
if (*s == '(')
delim = ')';
if (delim) {
char *p;
name = ++s;
p = strchr(name, delim);
if (!p) {
pr_err("no closing %c found in partition name\n", delim);
return ERR_PTR(-EINVAL);
}
name_len = p - name;
s = p + 1;
} else {
name = NULL;
name_len = 13; /* Partition_000 */
}
/* record name length for memory allocation later */
extra_mem_size += name_len + 1;
/* test for options */
if (strncmp(s, "ro", 2) == 0) {
mask_flags |= MTD_WRITEABLE;
s += 2;
}
/* if lk is found do NOT unlock the MTD partition*/
if (strncmp(s, "lk", 2) == 0) {
mask_flags |= MTD_POWERUP_LOCK;
s += 2;
}
/* if slc is found use emulated SLC mode on this partition*/
if (!strncmp(s, "slc", 3)) {
add_flags |= MTD_SLC_ON_MLC_EMULATION;
s += 3;
}
/* test if more partitions are following */
if (*s == ',') {
if (size == SIZE_REMAINING) {
pr_err("no partitions allowed after a fill-up partition\n");
return ERR_PTR(-EINVAL);
}
/* more partitions follow, parse them */
parts = newpart(s + 1, &s, num_parts, this_part + 1,
&extra_mem, extra_mem_size);
if (IS_ERR(parts))
return parts;
} else {
/* this is the last partition: allocate space for all */
int alloc_size;
*num_parts = this_part + 1;
alloc_size = *num_parts * sizeof(struct mtd_partition) +
extra_mem_size;
parts = kzalloc(alloc_size, GFP_KERNEL);
if (!parts)
return ERR_PTR(-ENOMEM);
extra_mem = (unsigned char *)(parts + *num_parts);
}
/*
* enter this partition (offset will be calculated later if it is
* OFFSET_CONTINUOUS at this point)
*/
parts[this_part].size = size;
parts[this_part].offset = offset;
parts[this_part].mask_flags = mask_flags;
parts[this_part].add_flags = add_flags;
if (name)
strscpy(extra_mem, name, name_len + 1);
else
sprintf(extra_mem, "Partition_%03d", this_part);
parts[this_part].name = extra_mem;
extra_mem += name_len + 1;
dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
this_part, parts[this_part].name, parts[this_part].offset,
parts[this_part].size, parts[this_part].mask_flags));
/* return (updated) pointer to extra_mem memory */
if (extra_mem_ptr)
*extra_mem_ptr = extra_mem;
/* return (updated) pointer command line string */
*retptr = s;
/* return partition table */
return parts;
}
/*
* Parse the command line.
*/
static int mtdpart_setup_real(char *s)
{
cmdline_parsed = 1;
for( ; s != NULL; )
{
struct cmdline_mtd_partition *this_mtd;
struct mtd_partition *parts;
int mtd_id_len, num_parts;
char *p, *mtd_id, *semicol, *open_parenth;
/*
* Replace the first ';' by a NULL char so strrchr can work
* properly.
*/
semicol = strchr(s, ';');
if (semicol)
*semicol = '\0';
/*
* make sure that part-names with ":" will not be handled as
* part of the mtd-id with an ":"
*/
open_parenth = strchr(s, '(');
if (open_parenth)
*open_parenth = '\0';
mtd_id = s;
/*
* fetch <mtd-id>. We use strrchr to ignore all ':' that could
* be present in the MTD name, only the last one is interpreted
* as an <mtd-id>/<part-definition> separator.
*/
p = strrchr(s, ':');
/* Restore the '(' now. */
if (open_parenth)
*open_parenth = '(';
/* Restore the ';' now. */
if (semicol)
*semicol = ';';
if (!p) {
pr_err("no mtd-id\n");
return -EINVAL;
}
mtd_id_len = p - mtd_id;
dbg(("parsing <%s>\n", p+1));
/*
* parse one mtd. have it reserve memory for the
* struct cmdline_mtd_partition and the mtd-id string.
*/
parts = newpart(p + 1, /* cmdline */
&s, /* out: updated cmdline ptr */
&num_parts, /* out: number of parts */
0, /* first partition */
(unsigned char**)&this_mtd, /* out: extra mem */
mtd_id_len + 1 + sizeof(*this_mtd) +
sizeof(void*)-1 /*alignment*/);
if (IS_ERR(parts)) {
/*
* An error occurred. We're either:
* a) out of memory, or
* b) in the middle of the partition spec
* Either way, this mtd is hosed and we're
* unlikely to succeed in parsing any more
*/
return PTR_ERR(parts);
}
/* align this_mtd */
this_mtd = (struct cmdline_mtd_partition *)
ALIGN((unsigned long)this_mtd, sizeof(void *));
/* enter results */
this_mtd->parts = parts;
this_mtd->num_parts = num_parts;
this_mtd->mtd_id = (char*)(this_mtd + 1);
strscpy(this_mtd->mtd_id, mtd_id, mtd_id_len + 1);
/* link into chain */
this_mtd->next = partitions;
partitions = this_mtd;
dbg(("mtdid=<%s> num_parts=<%d>\n",
this_mtd->mtd_id, this_mtd->num_parts));
/* EOS - we're done */
if (*s == 0)
break;
/* does another spec follow? */
if (*s != ';') {
pr_err("bad character after partition (%c)\n", *s);
return -EINVAL;
}
s++;
}
return 0;
}
/*
* Main function to be called from the MTD mapping driver/device to
* obtain the partitioning information. At this point the command line
* arguments will actually be parsed and turned to struct mtd_partition
* information. It returns partitions for the requested mtd device, or
* the first one in the chain if a NULL mtd_id is passed in.
*/
static int parse_cmdline_partitions(struct mtd_info *master,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
unsigned long long offset;
int i, err;
struct cmdline_mtd_partition *part;
const char *mtd_id = master->name;
/* parse command line */
if (!cmdline_parsed) {
err = mtdpart_setup_real(cmdline);
if (err)
return err;
}
/*
* Search for the partition definition matching master->name.
* If master->name is not set, stop at first partition definition.
*/
for (part = partitions; part; part = part->next) {
if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id)))
break;
}
if (!part)
return 0;
for (i = 0, offset = 0; i < part->num_parts; i++) {
if (part->parts[i].offset == OFFSET_CONTINUOUS)
part->parts[i].offset = offset;
else
offset = part->parts[i].offset;
if (part->parts[i].size == SIZE_REMAINING)
part->parts[i].size = master->size - offset;
if (offset + part->parts[i].size > master->size) {
pr_warn("%s: partitioning exceeds flash size, truncating\n",
part->mtd_id);
part->parts[i].size = master->size - offset;
}
offset += part->parts[i].size;
if (part->parts[i].size == 0) {
pr_warn("%s: skipping zero sized partition\n",
part->mtd_id);
part->num_parts--;
memmove(&part->parts[i], &part->parts[i + 1],
sizeof(*part->parts) * (part->num_parts - i));
i--;
}
}
*pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts,
GFP_KERNEL);
if (!*pparts)
return -ENOMEM;
return part->num_parts;
}
/*
* This is the handler for our kernel parameter, called from
* main.c::checksetup(). Note that we can not yet kmalloc() anything,
* so we only save the commandline for later processing.
*
* This function needs to be visible for bootloaders.
*/
static int __init mtdpart_setup(char *s)
{
cmdline = s;
return 1;
}
__setup("mtdparts=", mtdpart_setup);
static struct mtd_part_parser cmdline_parser = {
.parse_fn = parse_cmdline_partitions,
.name = "cmdlinepart",
};
static int __init cmdline_parser_init(void)
{
if (mtdparts)
mtdpart_setup(mtdparts);
register_mtd_parser(&cmdline_parser);
return 0;
}
static void __exit cmdline_parser_exit(void)
{
deregister_mtd_parser(&cmdline_parser);
}
module_init(cmdline_parser_init);
module_exit(cmdline_parser_exit);
MODULE_PARM_DESC(mtdparts, "Partitioning specification");
module_param(mtdparts, charp, 0);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marius Groeger <[email protected]>");
MODULE_DESCRIPTION("Command line configuration of MTD partitions");
| linux-master | drivers/mtd/parsers/cmdlinepart.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* BCM47XX MTD partitioning
*
* Copyright © 2012 Rafał Miłecki <[email protected]>
*/
#include <linux/bcm47xx_nvram.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <uapi/linux/magic.h>
/*
* NAND flash on Netgear R6250 was verified to contain 15 partitions.
* This will result in allocating too big array for some old devices, but the
* memory will be freed soon anyway (see mtd_device_parse_register).
*/
#define BCM47XXPART_MAX_PARTS 20
/*
* Amount of bytes we read when analyzing each block of flash memory.
* Set it big enough to allow detecting partition and reading important data.
*/
#define BCM47XXPART_BYTES_TO_READ 0x4e8
/* Magics */
#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
#define BOARD_DATA_MAGIC2 0xBD0D0BBD
#define CFE_MAGIC 0x43464531 /* 1EFC */
#define FACTORY_MAGIC 0x59544346 /* FCTY */
#define NVRAM_HEADER 0x48534C46 /* FLSH */
#define POT_MAGIC1 0x54544f50 /* POTT */
#define POT_MAGIC2 0x504f /* OP */
#define ML_MAGIC1 0x39685a42
#define ML_MAGIC2 0x26594131
#define TRX_MAGIC 0x30524448
#define SHSQ_MAGIC 0x71736873 /* shsq (weird ZTE H218N endianness) */
static const char * const trx_types[] = { "trx", NULL };
struct trx_header {
uint32_t magic;
uint32_t length;
uint32_t crc32;
uint16_t flags;
uint16_t version;
uint32_t offset[3];
} __packed;
static void bcm47xxpart_add_part(struct mtd_partition *part, const char *name,
u64 offset, uint32_t mask_flags)
{
part->name = name;
part->offset = offset;
part->mask_flags = mask_flags;
}
/**
* bcm47xxpart_bootpartition - gets index of TRX partition used by bootloader
*
* Some devices may have more than one TRX partition. In such case one of them
* is the main one and another a failsafe one. Bootloader may fallback to the
* failsafe firmware if it detects corruption of the main image.
*
* This function provides info about currently used TRX partition. It's the one
* containing kernel started by the bootloader.
*/
static int bcm47xxpart_bootpartition(void)
{
char buf[4];
int bootpartition;
/* Check CFE environment variable */
if (bcm47xx_nvram_getenv("bootpartition", buf, sizeof(buf)) > 0) {
if (!kstrtoint(buf, 0, &bootpartition))
return bootpartition;
}
return 0;
}
static int bcm47xxpart_parse(struct mtd_info *master,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
struct mtd_partition *parts;
uint8_t i, curr_part = 0;
uint32_t *buf;
size_t bytes_read;
uint32_t offset;
uint32_t blocksize = master->erasesize;
int trx_parts[2]; /* Array with indexes of TRX partitions */
int trx_num = 0; /* Number of found TRX partitions */
int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
int err;
/*
* Some really old flashes (like AT45DB*) had smaller erasesize-s, but
* partitions were aligned to at least 0x1000 anyway.
*/
if (blocksize < 0x1000)
blocksize = 0x1000;
/* Alloc */
parts = kcalloc(BCM47XXPART_MAX_PARTS, sizeof(struct mtd_partition),
GFP_KERNEL);
if (!parts)
return -ENOMEM;
buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
if (!buf) {
kfree(parts);
return -ENOMEM;
}
/* Parse block by block looking for magics */
for (offset = 0; offset <= master->size - blocksize;
offset += blocksize) {
/* Nothing more in higher memory on BCM47XX (MIPS) */
if (IS_ENABLED(CONFIG_BCM47XX) && offset >= 0x2000000)
break;
if (curr_part >= BCM47XXPART_MAX_PARTS) {
pr_warn("Reached maximum number of partitions, scanning stopped!\n");
break;
}
/* Read beginning of the block */
err = mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
&bytes_read, (uint8_t *)buf);
if (err && !mtd_is_bitflip(err)) {
pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
offset, err);
continue;
}
/* Magic or small NVRAM at 0x400 */
if ((buf[0x4e0 / 4] == CFE_MAGIC && buf[0x4e4 / 4] == CFE_MAGIC) ||
(buf[0x400 / 4] == NVRAM_HEADER)) {
bcm47xxpart_add_part(&parts[curr_part++], "boot",
offset, MTD_WRITEABLE);
continue;
}
/*
* board_data starts with board_id which differs across boards,
* but we can use 'MPFR' (hopefully) magic at 0x100
*/
if (buf[0x100 / 4] == BOARD_DATA_MAGIC) {
bcm47xxpart_add_part(&parts[curr_part++], "board_data",
offset, MTD_WRITEABLE);
continue;
}
/* Found on Huawei E970 */
if (buf[0x000 / 4] == FACTORY_MAGIC) {
bcm47xxpart_add_part(&parts[curr_part++], "factory",
offset, MTD_WRITEABLE);
continue;
}
/* POT(TOP) */
if (buf[0x000 / 4] == POT_MAGIC1 &&
(buf[0x004 / 4] & 0xFFFF) == POT_MAGIC2) {
bcm47xxpart_add_part(&parts[curr_part++], "POT", offset,
MTD_WRITEABLE);
continue;
}
/* ML */
if (buf[0x010 / 4] == ML_MAGIC1 &&
buf[0x014 / 4] == ML_MAGIC2) {
bcm47xxpart_add_part(&parts[curr_part++], "ML", offset,
MTD_WRITEABLE);
continue;
}
/* TRX */
if (buf[0x000 / 4] == TRX_MAGIC) {
struct trx_header *trx;
uint32_t last_subpart;
uint32_t trx_size;
if (trx_num >= ARRAY_SIZE(trx_parts))
pr_warn("No enough space to store another TRX found at 0x%X\n",
offset);
else
trx_parts[trx_num++] = curr_part;
bcm47xxpart_add_part(&parts[curr_part++], "firmware",
offset, 0);
/*
* Try to find TRX size. The "length" field isn't fully
* reliable as it could be decreased to make CRC32 cover
* only part of TRX data. It's commonly used as checksum
* can't cover e.g. ever-changing rootfs partition.
* Use offsets as helpers for assuming min TRX size.
*/
trx = (struct trx_header *)buf;
last_subpart = max3(trx->offset[0], trx->offset[1],
trx->offset[2]);
trx_size = max(trx->length, last_subpart + blocksize);
/*
* Skip the TRX data. Decrease offset by block size as
* the next loop iteration will increase it.
*/
offset += roundup(trx_size, blocksize) - blocksize;
continue;
}
/* Squashfs on devices not using TRX */
if (le32_to_cpu(buf[0x000 / 4]) == SQUASHFS_MAGIC ||
buf[0x000 / 4] == SHSQ_MAGIC) {
bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
offset, 0);
continue;
}
/*
* New (ARM?) devices may have NVRAM in some middle block. Last
* block will be checked later, so skip it.
*/
if (offset != master->size - blocksize &&
buf[0x000 / 4] == NVRAM_HEADER) {
bcm47xxpart_add_part(&parts[curr_part++], "nvram",
offset, 0);
continue;
}
/* Read middle of the block */
err = mtd_read(master, offset + (blocksize / 2), 0x4, &bytes_read,
(uint8_t *)buf);
if (err && !mtd_is_bitflip(err)) {
pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
offset + (blocksize / 2), err);
continue;
}
/* Some devices (ex. WNDR3700v3) don't have a standard 'MPFR' */
if (buf[0x000 / 4] == BOARD_DATA_MAGIC2) {
bcm47xxpart_add_part(&parts[curr_part++], "board_data",
offset, MTD_WRITEABLE);
continue;
}
}
/* Look for NVRAM at the end of the last block. */
for (i = 0; i < ARRAY_SIZE(possible_nvram_sizes); i++) {
if (curr_part >= BCM47XXPART_MAX_PARTS) {
pr_warn("Reached maximum number of partitions, scanning stopped!\n");
break;
}
offset = master->size - possible_nvram_sizes[i];
err = mtd_read(master, offset, 0x4, &bytes_read,
(uint8_t *)buf);
if (err && !mtd_is_bitflip(err)) {
pr_err("mtd_read error while reading (offset 0x%X): %d\n",
offset, err);
continue;
}
/* Standard NVRAM */
if (buf[0] == NVRAM_HEADER) {
bcm47xxpart_add_part(&parts[curr_part++], "nvram",
master->size - blocksize, 0);
break;
}
}
kfree(buf);
/*
* Assume that partitions end at the beginning of the one they are
* followed by.
*/
for (i = 0; i < curr_part; i++) {
u64 next_part_offset = (i < curr_part - 1) ?
parts[i + 1].offset : master->size;
parts[i].size = next_part_offset - parts[i].offset;
}
/* If there was TRX parse it now */
for (i = 0; i < trx_num; i++) {
struct mtd_partition *trx = &parts[trx_parts[i]];
if (i == bcm47xxpart_bootpartition())
trx->types = trx_types;
else
trx->name = "failsafe";
}
*pparts = parts;
return curr_part;
};
static const struct of_device_id bcm47xxpart_of_match_table[] = {
{ .compatible = "brcm,bcm947xx-cfe-partitions" },
{},
};
MODULE_DEVICE_TABLE(of, bcm47xxpart_of_match_table);
static struct mtd_part_parser bcm47xxpart_mtd_parser = {
.parse_fn = bcm47xxpart_parse,
.name = "bcm47xxpart",
.of_match_table = bcm47xxpart_of_match_table,
};
module_mtd_part_parser(bcm47xxpart_mtd_parser);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MTD partitioning for BCM47XX flash memories");
| linux-master | drivers/mtd/parsers/bcm47xxpart.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2022 Rafał Miłecki <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#define BRCM_U_BOOT_MAX_OFFSET 0x200000
#define BRCM_U_BOOT_STEP 0x1000
#define BRCM_U_BOOT_MAX_PARTS 2
#define BRCM_U_BOOT_MAGIC 0x75456e76 /* uEnv */
struct brcm_u_boot_header {
__le32 magic;
__le32 length;
} __packed;
static const char *names[BRCM_U_BOOT_MAX_PARTS] = {
"u-boot-env",
"u-boot-env-backup",
};
static int brcm_u_boot_parse(struct mtd_info *mtd,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
struct brcm_u_boot_header header;
struct mtd_partition *parts;
size_t bytes_read;
size_t offset;
int err;
int i = 0;
parts = kcalloc(BRCM_U_BOOT_MAX_PARTS, sizeof(*parts), GFP_KERNEL);
if (!parts)
return -ENOMEM;
for (offset = 0;
offset < min_t(size_t, mtd->size, BRCM_U_BOOT_MAX_OFFSET);
offset += BRCM_U_BOOT_STEP) {
err = mtd_read(mtd, offset, sizeof(header), &bytes_read, (uint8_t *)&header);
if (err && !mtd_is_bitflip(err)) {
pr_err("Failed to read from %s at 0x%zx: %d\n", mtd->name, offset, err);
continue;
}
if (le32_to_cpu(header.magic) != BRCM_U_BOOT_MAGIC)
continue;
parts[i].name = names[i];
parts[i].offset = offset;
parts[i].size = sizeof(header) + le32_to_cpu(header.length);
i++;
pr_info("offset:0x%zx magic:0x%08x BINGO\n", offset, header.magic);
if (i == BRCM_U_BOOT_MAX_PARTS)
break;
}
*pparts = parts;
return i;
};
static const struct of_device_id brcm_u_boot_of_match_table[] = {
{ .compatible = "brcm,u-boot" },
{},
};
MODULE_DEVICE_TABLE(of, brcm_u_boot_of_match_table);
static struct mtd_part_parser brcm_u_boot_mtd_parser = {
.parse_fn = brcm_u_boot_parse,
.name = "brcm_u-boot",
.of_match_table = brcm_u_boot_of_match_table,
};
module_mtd_part_parser(brcm_u_boot_mtd_parser);
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/parsers/brcm_u-boot.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*======================================================================
drivers/mtd/afs.c: ARM Flash Layout/Partitioning
Copyright © 2000 ARM Limited
Copyright (C) 2019 Linus Walleij
This is access code for flashes using ARM's flash partitioning
standards.
======================================================================*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#define AFSV1_FOOTER_MAGIC 0xA0FFFF9F
#define AFSV2_FOOTER_MAGIC1 0x464C5348 /* "FLSH" */
#define AFSV2_FOOTER_MAGIC2 0x464F4F54 /* "FOOT" */
struct footer_v1 {
u32 image_info_base; /* Address of first word of ImageFooter */
u32 image_start; /* Start of area reserved by this footer */
u32 signature; /* 'Magic' number proves it's a footer */
u32 type; /* Area type: ARM Image, SIB, customer */
u32 checksum; /* Just this structure */
};
struct image_info_v1 {
u32 bootFlags; /* Boot flags, compression etc. */
u32 imageNumber; /* Unique number, selects for boot etc. */
u32 loadAddress; /* Address program should be loaded to */
u32 length; /* Actual size of image */
u32 address; /* Image is executed from here */
char name[16]; /* Null terminated */
u32 headerBase; /* Flash Address of any stripped header */
u32 header_length; /* Length of header in memory */
u32 headerType; /* AIF, RLF, s-record etc. */
u32 checksum; /* Image checksum (inc. this struct) */
};
static u32 word_sum(void *words, int num)
{
u32 *p = words;
u32 sum = 0;
while (num--)
sum += *p++;
return sum;
}
static u32 word_sum_v2(u32 *p, u32 num)
{
u32 sum = 0;
int i;
for (i = 0; i < num; i++) {
u32 val;
val = p[i];
if (val > ~sum)
sum++;
sum += val;
}
return ~sum;
}
static bool afs_is_v1(struct mtd_info *mtd, u_int off)
{
/* The magic is 12 bytes from the end of the erase block */
u_int ptr = off + mtd->erasesize - 12;
u32 magic;
size_t sz;
int ret;
ret = mtd_read(mtd, ptr, 4, &sz, (u_char *)&magic);
if (ret < 0) {
printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
ptr, ret);
return false;
}
if (ret >= 0 && sz != 4)
return false;
return (magic == AFSV1_FOOTER_MAGIC);
}
static bool afs_is_v2(struct mtd_info *mtd, u_int off)
{
/* The magic is the 8 last bytes of the erase block */
u_int ptr = off + mtd->erasesize - 8;
u32 foot[2];
size_t sz;
int ret;
ret = mtd_read(mtd, ptr, 8, &sz, (u_char *)foot);
if (ret < 0) {
printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
ptr, ret);
return false;
}
if (ret >= 0 && sz != 8)
return false;
return (foot[0] == AFSV2_FOOTER_MAGIC1 &&
foot[1] == AFSV2_FOOTER_MAGIC2);
}
static int afs_parse_v1_partition(struct mtd_info *mtd,
u_int off, struct mtd_partition *part)
{
struct footer_v1 fs;
struct image_info_v1 iis;
u_int mask;
/*
* Static checks cannot see that we bail out if we have an error
* reading the footer.
*/
u_int iis_ptr;
u_int img_ptr;
u_int ptr;
size_t sz;
int ret;
int i;
/*
* This is the address mask; we use this to mask off out of
* range address bits.
*/
mask = mtd->size - 1;
ptr = off + mtd->erasesize - sizeof(fs);
ret = mtd_read(mtd, ptr, sizeof(fs), &sz, (u_char *)&fs);
if (ret >= 0 && sz != sizeof(fs))
ret = -EINVAL;
if (ret < 0) {
printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
ptr, ret);
return ret;
}
/*
* Check the checksum.
*/
if (word_sum(&fs, sizeof(fs) / sizeof(u32)) != 0xffffffff)
return -EINVAL;
/*
* Hide the SIB (System Information Block)
*/
if (fs.type == 2)
return 0;
iis_ptr = fs.image_info_base & mask;
img_ptr = fs.image_start & mask;
/*
* Check the image info base. This can not
* be located after the footer structure.
*/
if (iis_ptr >= ptr)
return 0;
/*
* Check the start of this image. The image
* data can not be located after this block.
*/
if (img_ptr > off)
return 0;
/* Read the image info block */
memset(&iis, 0, sizeof(iis));
ret = mtd_read(mtd, iis_ptr, sizeof(iis), &sz, (u_char *)&iis);
if (ret < 0) {
printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
iis_ptr, ret);
return -EINVAL;
}
if (sz != sizeof(iis))
return -EINVAL;
/*
* Validate the name - it must be NUL terminated.
*/
for (i = 0; i < sizeof(iis.name); i++)
if (iis.name[i] == '\0')
break;
if (i > sizeof(iis.name))
return -EINVAL;
part->name = kstrdup(iis.name, GFP_KERNEL);
if (!part->name)
return -ENOMEM;
part->size = (iis.length + mtd->erasesize - 1) & ~(mtd->erasesize - 1);
part->offset = img_ptr;
part->mask_flags = 0;
printk(" mtd: at 0x%08x, %5lluKiB, %8u, %s\n",
img_ptr, part->size / 1024,
iis.imageNumber, part->name);
return 0;
}
static int afs_parse_v2_partition(struct mtd_info *mtd,
u_int off, struct mtd_partition *part)
{
u_int ptr;
u32 footer[12];
u32 imginfo[36];
char *name;
u32 version;
u32 entrypoint;
u32 attributes;
u32 region_count;
u32 block_start;
u32 block_end;
u32 crc;
size_t sz;
int ret;
int i;
int pad = 0;
pr_debug("Parsing v2 partition @%08x-%08x\n",
off, off + mtd->erasesize);
/* First read the footer */
ptr = off + mtd->erasesize - sizeof(footer);
ret = mtd_read(mtd, ptr, sizeof(footer), &sz, (u_char *)footer);
if ((ret < 0) || (ret >= 0 && sz != sizeof(footer))) {
pr_err("AFS: mtd read failed at 0x%x: %d\n",
ptr, ret);
return -EIO;
}
name = (char *) &footer[0];
version = footer[9];
ptr = off + mtd->erasesize - sizeof(footer) - footer[8];
pr_debug("found image \"%s\", version %08x, info @%08x\n",
name, version, ptr);
/* Then read the image information */
ret = mtd_read(mtd, ptr, sizeof(imginfo), &sz, (u_char *)imginfo);
if ((ret < 0) || (ret >= 0 && sz != sizeof(imginfo))) {
pr_err("AFS: mtd read failed at 0x%x: %d\n",
ptr, ret);
return -EIO;
}
/* 32bit platforms have 4 bytes padding */
crc = word_sum_v2(&imginfo[1], 34);
if (!crc) {
pr_debug("Padding 1 word (4 bytes)\n");
pad = 1;
} else {
/* 64bit platforms have 8 bytes padding */
crc = word_sum_v2(&imginfo[2], 34);
if (!crc) {
pr_debug("Padding 2 words (8 bytes)\n");
pad = 2;
}
}
if (crc) {
pr_err("AFS: bad checksum on v2 image info: %08x\n", crc);
return -EINVAL;
}
entrypoint = imginfo[pad];
attributes = imginfo[pad+1];
region_count = imginfo[pad+2];
block_start = imginfo[20];
block_end = imginfo[21];
pr_debug("image entry=%08x, attr=%08x, regions=%08x, "
"bs=%08x, be=%08x\n",
entrypoint, attributes, region_count,
block_start, block_end);
for (i = 0; i < region_count; i++) {
u32 region_load_addr = imginfo[pad + 3 + i*4];
u32 region_size = imginfo[pad + 4 + i*4];
u32 region_offset = imginfo[pad + 5 + i*4];
u32 region_start;
u32 region_end;
pr_debug(" region %d: address: %08x, size: %08x, "
"offset: %08x\n",
i,
region_load_addr,
region_size,
region_offset);
region_start = off + region_offset;
region_end = region_start + region_size;
/* Align partition to end of erase block */
region_end += (mtd->erasesize - 1);
region_end &= ~(mtd->erasesize -1);
pr_debug(" partition start = %08x, partition end = %08x\n",
region_start, region_end);
/* Create one partition per region */
part->name = kstrdup(name, GFP_KERNEL);
if (!part->name)
return -ENOMEM;
part->offset = region_start;
part->size = region_end - region_start;
part->mask_flags = 0;
}
return 0;
}
static int parse_afs_partitions(struct mtd_info *mtd,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
struct mtd_partition *parts;
u_int off, sz;
int ret = 0;
int i;
/* Count the partitions by looping over all erase blocks */
for (i = off = sz = 0; off < mtd->size; off += mtd->erasesize) {
if (afs_is_v1(mtd, off)) {
sz += sizeof(struct mtd_partition);
i += 1;
}
if (afs_is_v2(mtd, off)) {
sz += sizeof(struct mtd_partition);
i += 1;
}
}
if (!i)
return 0;
parts = kzalloc(sz, GFP_KERNEL);
if (!parts)
return -ENOMEM;
/*
* Identify the partitions
*/
for (i = off = 0; off < mtd->size; off += mtd->erasesize) {
if (afs_is_v1(mtd, off)) {
ret = afs_parse_v1_partition(mtd, off, &parts[i]);
if (ret)
goto out_free_parts;
i++;
}
if (afs_is_v2(mtd, off)) {
ret = afs_parse_v2_partition(mtd, off, &parts[i]);
if (ret)
goto out_free_parts;
i++;
}
}
*pparts = parts;
return i;
out_free_parts:
while (--i >= 0)
kfree(parts[i].name);
kfree(parts);
*pparts = NULL;
return ret;
}
static const struct of_device_id mtd_parser_afs_of_match_table[] = {
{ .compatible = "arm,arm-firmware-suite" },
{},
};
MODULE_DEVICE_TABLE(of, mtd_parser_afs_of_match_table);
static struct mtd_part_parser afs_parser = {
.parse_fn = parse_afs_partitions,
.name = "afs",
.of_match_table = mtd_parser_afs_of_match_table,
};
module_mtd_part_parser(afs_parser);
MODULE_AUTHOR("ARM Ltd");
MODULE_DESCRIPTION("ARM Firmware Suite partition parser");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/parsers/afs.c |
/*
* sharpslpart.c - MTD partition parser for NAND flash using the SHARP FTL
* for logical addressing, as used on the PXA models of the SHARP SL Series.
*
* Copyright (C) 2017 Andrea Adami <[email protected]>
*
* Based on SHARP GPL 2.4 sources:
* http://support.ezaurus.com/developer/source/source_dl.asp
* drivers/mtd/nand/sharp_sl_logical.c
* linux/include/asm-arm/sharp_nand_logical.h
*
* Copyright (C) 2002 SHARP
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/sizes.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
/* oob structure */
#define NAND_NOOB_LOGADDR_00 8
#define NAND_NOOB_LOGADDR_01 9
#define NAND_NOOB_LOGADDR_10 10
#define NAND_NOOB_LOGADDR_11 11
#define NAND_NOOB_LOGADDR_20 12
#define NAND_NOOB_LOGADDR_21 13
#define BLOCK_IS_RESERVED 0xffff
#define BLOCK_UNMASK_COMPLEMENT 1
/* factory defaults */
#define SHARPSL_NAND_PARTS 3
#define SHARPSL_FTL_PART_SIZE (7 * SZ_1M)
#define SHARPSL_PARTINFO1_LADDR 0x00060000
#define SHARPSL_PARTINFO2_LADDR 0x00064000
#define BOOT_MAGIC 0x424f4f54
#define FSRO_MAGIC 0x4653524f
#define FSRW_MAGIC 0x46535257
/**
* struct sharpsl_ftl - Sharp FTL Logical Table
* @logmax: number of logical blocks
* @log2phy: the logical-to-physical table
*
* Structure containing the logical-to-physical translation table
* used by the SHARP SL FTL.
*/
struct sharpsl_ftl {
unsigned int logmax;
unsigned int *log2phy;
};
/* verify that the OOB bytes 8 to 15 are free and available for the FTL */
static int sharpsl_nand_check_ooblayout(struct mtd_info *mtd)
{
u8 freebytes = 0;
int section = 0;
while (true) {
struct mtd_oob_region oobfree = { };
int ret, i;
ret = mtd_ooblayout_free(mtd, section++, &oobfree);
if (ret)
break;
if (!oobfree.length || oobfree.offset > 15 ||
(oobfree.offset + oobfree.length) < 8)
continue;
i = oobfree.offset >= 8 ? oobfree.offset : 8;
for (; i < oobfree.offset + oobfree.length && i < 16; i++)
freebytes |= BIT(i - 8);
if (freebytes == 0xff)
return 0;
}
return -ENOTSUPP;
}
static int sharpsl_nand_read_oob(struct mtd_info *mtd, loff_t offs, u8 *buf)
{
struct mtd_oob_ops ops = { };
int ret;
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooblen = mtd->oobsize;
ops.oobbuf = buf;
ret = mtd_read_oob(mtd, offs, &ops);
if (ret != 0 || mtd->oobsize != ops.oobretlen)
return -1;
return 0;
}
/*
* The logical block number assigned to a physical block is stored in the OOB
* of the first page, in 3 16-bit copies with the following layout:
*
* 01234567 89abcdef
* -------- --------
* ECC BB xyxyxy
*
* When reading we check that the first two copies agree.
* In case of error, matching is tried using the following pairs.
* Reserved values 0xffff mean the block is kept for wear leveling.
*
* 01234567 89abcdef
* -------- --------
* ECC BB xyxy oob[8]==oob[10] && oob[9]==oob[11] -> byte0=8 byte1=9
* ECC BB xyxy oob[10]==oob[12] && oob[11]==oob[13] -> byte0=10 byte1=11
* ECC BB xy xy oob[12]==oob[8] && oob[13]==oob[9] -> byte0=12 byte1=13
*/
static int sharpsl_nand_get_logical_num(u8 *oob)
{
u16 us;
int good0, good1;
if (oob[NAND_NOOB_LOGADDR_00] == oob[NAND_NOOB_LOGADDR_10] &&
oob[NAND_NOOB_LOGADDR_01] == oob[NAND_NOOB_LOGADDR_11]) {
good0 = NAND_NOOB_LOGADDR_00;
good1 = NAND_NOOB_LOGADDR_01;
} else if (oob[NAND_NOOB_LOGADDR_10] == oob[NAND_NOOB_LOGADDR_20] &&
oob[NAND_NOOB_LOGADDR_11] == oob[NAND_NOOB_LOGADDR_21]) {
good0 = NAND_NOOB_LOGADDR_10;
good1 = NAND_NOOB_LOGADDR_11;
} else if (oob[NAND_NOOB_LOGADDR_20] == oob[NAND_NOOB_LOGADDR_00] &&
oob[NAND_NOOB_LOGADDR_21] == oob[NAND_NOOB_LOGADDR_01]) {
good0 = NAND_NOOB_LOGADDR_20;
good1 = NAND_NOOB_LOGADDR_21;
} else {
return -EINVAL;
}
us = oob[good0] | oob[good1] << 8;
/* parity check */
if (hweight16(us) & BLOCK_UNMASK_COMPLEMENT)
return -EINVAL;
/* reserved */
if (us == BLOCK_IS_RESERVED)
return BLOCK_IS_RESERVED;
return (us >> 1) & GENMASK(9, 0);
}
static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
{
unsigned int block_num, phymax;
int i, ret, log_num;
loff_t block_adr;
u8 *oob;
oob = kzalloc(mtd->oobsize, GFP_KERNEL);
if (!oob)
return -ENOMEM;
phymax = mtd_div_by_eb(SHARPSL_FTL_PART_SIZE, mtd);
/* FTL reserves 5% of the blocks + 1 spare */
ftl->logmax = ((phymax * 95) / 100) - 1;
ftl->log2phy = kmalloc_array(ftl->logmax, sizeof(*ftl->log2phy),
GFP_KERNEL);
if (!ftl->log2phy) {
ret = -ENOMEM;
goto exit;
}
/* initialize ftl->log2phy */
for (i = 0; i < ftl->logmax; i++)
ftl->log2phy[i] = UINT_MAX;
/* create physical-logical table */
for (block_num = 0; block_num < phymax; block_num++) {
block_adr = (loff_t)block_num * mtd->erasesize;
if (mtd_block_isbad(mtd, block_adr))
continue;
if (sharpsl_nand_read_oob(mtd, block_adr, oob))
continue;
/* get logical block */
log_num = sharpsl_nand_get_logical_num(oob);
/* cut-off errors and skip the out-of-range values */
if (log_num > 0 && log_num < ftl->logmax) {
if (ftl->log2phy[log_num] == UINT_MAX)
ftl->log2phy[log_num] = block_num;
}
}
pr_info("Sharp SL FTL: %d blocks used (%d logical, %d reserved)\n",
phymax, ftl->logmax, phymax - ftl->logmax);
ret = 0;
exit:
kfree(oob);
return ret;
}
static void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
{
kfree(ftl->log2phy);
}
static int sharpsl_nand_read_laddr(struct mtd_info *mtd,
loff_t from,
size_t len,
void *buf,
struct sharpsl_ftl *ftl)
{
unsigned int log_num, final_log_num;
unsigned int block_num;
loff_t block_adr;
loff_t block_ofs;
size_t retlen;
int err;
log_num = mtd_div_by_eb((u32)from, mtd);
final_log_num = mtd_div_by_eb(((u32)from + len - 1), mtd);
if (len <= 0 || log_num >= ftl->logmax || final_log_num > log_num)
return -EINVAL;
block_num = ftl->log2phy[log_num];
block_adr = (loff_t)block_num * mtd->erasesize;
block_ofs = mtd_mod_by_eb((u32)from, mtd);
err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf);
/* Ignore corrected ECC errors */
if (mtd_is_bitflip(err))
err = 0;
if (!err && retlen != len)
err = -EIO;
if (err)
pr_err("sharpslpart: error, read failed at %#llx\n",
block_adr + block_ofs);
return err;
}
/*
* MTD Partition Parser
*
* Sample values read from SL-C860
*
* # cat /proc/mtd
* dev: size erasesize name
* mtd0: 006d0000 00020000 "Filesystem"
* mtd1: 00700000 00004000 "smf"
* mtd2: 03500000 00004000 "root"
* mtd3: 04400000 00004000 "home"
*
* PARTITIONINFO1
* 0x00060000: 00 00 00 00 00 00 70 00 42 4f 4f 54 00 00 00 00 ......p.BOOT....
* 0x00060010: 00 00 70 00 00 00 c0 03 46 53 52 4f 00 00 00 00 ..p.....FSRO....
* 0x00060020: 00 00 c0 03 00 00 00 04 46 53 52 57 00 00 00 00 ........FSRW....
*/
struct sharpsl_nand_partinfo {
__le32 start;
__le32 end;
__be32 magic;
u32 reserved;
};
static int sharpsl_nand_read_partinfo(struct mtd_info *master,
loff_t from,
size_t len,
struct sharpsl_nand_partinfo *buf,
struct sharpsl_ftl *ftl)
{
int ret;
ret = sharpsl_nand_read_laddr(master, from, len, buf, ftl);
if (ret)
return ret;
/* check for magics */
if (be32_to_cpu(buf[0].magic) != BOOT_MAGIC ||
be32_to_cpu(buf[1].magic) != FSRO_MAGIC ||
be32_to_cpu(buf[2].magic) != FSRW_MAGIC) {
pr_err("sharpslpart: magic values mismatch\n");
return -EINVAL;
}
/* fixup for hardcoded value 64 MiB (for older models) */
buf[2].end = cpu_to_le32(master->size);
/* extra sanity check */
if (le32_to_cpu(buf[0].end) <= le32_to_cpu(buf[0].start) ||
le32_to_cpu(buf[1].start) < le32_to_cpu(buf[0].end) ||
le32_to_cpu(buf[1].end) <= le32_to_cpu(buf[1].start) ||
le32_to_cpu(buf[2].start) < le32_to_cpu(buf[1].end) ||
le32_to_cpu(buf[2].end) <= le32_to_cpu(buf[2].start)) {
pr_err("sharpslpart: partition sizes mismatch\n");
return -EINVAL;
}
return 0;
}
static int sharpsl_parse_mtd_partitions(struct mtd_info *master,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
struct sharpsl_ftl ftl;
struct sharpsl_nand_partinfo buf[SHARPSL_NAND_PARTS];
struct mtd_partition *sharpsl_nand_parts;
int err;
/* check that OOB bytes 8 to 15 used by the FTL are actually free */
err = sharpsl_nand_check_ooblayout(master);
if (err)
return err;
/* init logical mgmt (FTL) */
err = sharpsl_nand_init_ftl(master, &ftl);
if (err)
return err;
/* read and validate first partition table */
pr_info("sharpslpart: try reading first partition table\n");
err = sharpsl_nand_read_partinfo(master,
SHARPSL_PARTINFO1_LADDR,
sizeof(buf), buf, &ftl);
if (err) {
/* fallback: read second partition table */
pr_warn("sharpslpart: first partition table is invalid, retry using the second\n");
err = sharpsl_nand_read_partinfo(master,
SHARPSL_PARTINFO2_LADDR,
sizeof(buf), buf, &ftl);
}
/* cleanup logical mgmt (FTL) */
sharpsl_nand_cleanup_ftl(&ftl);
if (err) {
pr_err("sharpslpart: both partition tables are invalid\n");
return err;
}
sharpsl_nand_parts = kcalloc(SHARPSL_NAND_PARTS,
sizeof(*sharpsl_nand_parts),
GFP_KERNEL);
if (!sharpsl_nand_parts)
return -ENOMEM;
/* original names */
sharpsl_nand_parts[0].name = "smf";
sharpsl_nand_parts[0].offset = le32_to_cpu(buf[0].start);
sharpsl_nand_parts[0].size = le32_to_cpu(buf[0].end) -
le32_to_cpu(buf[0].start);
sharpsl_nand_parts[1].name = "root";
sharpsl_nand_parts[1].offset = le32_to_cpu(buf[1].start);
sharpsl_nand_parts[1].size = le32_to_cpu(buf[1].end) -
le32_to_cpu(buf[1].start);
sharpsl_nand_parts[2].name = "home";
sharpsl_nand_parts[2].offset = le32_to_cpu(buf[2].start);
sharpsl_nand_parts[2].size = le32_to_cpu(buf[2].end) -
le32_to_cpu(buf[2].start);
*pparts = sharpsl_nand_parts;
return SHARPSL_NAND_PARTS;
}
static struct mtd_part_parser sharpsl_mtd_parser = {
.parse_fn = sharpsl_parse_mtd_partitions,
.name = "sharpslpart",
};
module_mtd_part_parser(sharpsl_mtd_parser);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Andrea Adami <[email protected]>");
MODULE_DESCRIPTION("MTD partitioning for NAND flash on Sharp SL Series");
| linux-master | drivers/mtd/parsers/sharpslpart.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2022 Rafał Miłecki <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
#include <linux/slab.h>
#define TPLINK_SAFELOADER_DATA_OFFSET 4
#define TPLINK_SAFELOADER_MAX_PARTS 32
struct safeloader_cmn_header {
__be32 size;
uint32_t unused;
} __packed;
static void *mtd_parser_tplink_safeloader_read_table(struct mtd_info *mtd)
{
struct safeloader_cmn_header hdr;
struct device_node *np;
size_t bytes_read;
size_t size;
u32 offset;
char *buf;
int err;
np = mtd_get_of_node(mtd);
if (mtd_is_partition(mtd))
of_node_get(np);
else
np = of_get_child_by_name(np, "partitions");
if (of_property_read_u32(np, "partitions-table-offset", &offset)) {
pr_err("Failed to get partitions table offset\n");
goto err_put;
}
err = mtd_read(mtd, offset, sizeof(hdr), &bytes_read, (uint8_t *)&hdr);
if (err && !mtd_is_bitflip(err)) {
pr_err("Failed to read from %s at 0x%x\n", mtd->name, offset);
goto err_put;
}
size = be32_to_cpu(hdr.size);
buf = kmalloc(size + 1, GFP_KERNEL);
if (!buf)
goto err_put;
err = mtd_read(mtd, offset + sizeof(hdr), size, &bytes_read, buf);
if (err && !mtd_is_bitflip(err)) {
pr_err("Failed to read from %s at 0x%zx\n", mtd->name, offset + sizeof(hdr));
goto err_kfree;
}
buf[size] = '\0';
of_node_put(np);
return buf;
err_kfree:
kfree(buf);
err_put:
of_node_put(np);
return NULL;
}
static int mtd_parser_tplink_safeloader_parse(struct mtd_info *mtd,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
struct mtd_partition *parts;
char name[65];
size_t offset;
size_t bytes;
char *buf;
int idx;
int err;
parts = kcalloc(TPLINK_SAFELOADER_MAX_PARTS, sizeof(*parts), GFP_KERNEL);
if (!parts) {
err = -ENOMEM;
goto err_out;
}
buf = mtd_parser_tplink_safeloader_read_table(mtd);
if (!buf) {
err = -ENOENT;
goto err_free_parts;
}
for (idx = 0, offset = TPLINK_SAFELOADER_DATA_OFFSET;
idx < TPLINK_SAFELOADER_MAX_PARTS &&
sscanf(buf + offset, "partition %64s base 0x%llx size 0x%llx%zn\n",
name, &parts[idx].offset, &parts[idx].size, &bytes) == 3;
idx++, offset += bytes + 1) {
parts[idx].name = kstrdup(name, GFP_KERNEL);
if (!parts[idx].name) {
err = -ENOMEM;
goto err_free;
}
}
if (idx == TPLINK_SAFELOADER_MAX_PARTS)
pr_warn("Reached maximum number of partitions!\n");
kfree(buf);
*pparts = parts;
return idx;
err_free:
for (idx -= 1; idx >= 0; idx--)
kfree(parts[idx].name);
err_free_parts:
kfree(parts);
err_out:
return err;
};
static void mtd_parser_tplink_safeloader_cleanup(const struct mtd_partition *pparts,
int nr_parts)
{
int i;
for (i = 0; i < nr_parts; i++)
kfree(pparts[i].name);
kfree(pparts);
}
static const struct of_device_id mtd_parser_tplink_safeloader_of_match_table[] = {
{ .compatible = "tplink,safeloader-partitions" },
{},
};
MODULE_DEVICE_TABLE(of, mtd_parser_tplink_safeloader_of_match_table);
static struct mtd_part_parser mtd_parser_tplink_safeloader = {
.parse_fn = mtd_parser_tplink_safeloader_parse,
.cleanup = mtd_parser_tplink_safeloader_cleanup,
.name = "tplink-safeloader",
.of_match_table = mtd_parser_tplink_safeloader_of_match_table,
};
module_mtd_part_parser(mtd_parser_tplink_safeloader);
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/parsers/tplink_safeloader.c |
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
// Author: Vignesh Raghavendra <[email protected]>
#include <linux/completion.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/hyperbus.h>
#include <linux/mtd/mtd.h>
#include <linux/mux/consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/sched/task_stack.h>
#include <linux/types.h>
#define AM654_HBMC_CALIB_COUNT 25
struct am654_hbmc_device_priv {
struct completion rx_dma_complete;
phys_addr_t device_base;
struct hyperbus_ctlr *ctlr;
struct dma_chan *rx_chan;
};
struct am654_hbmc_priv {
struct hyperbus_ctlr ctlr;
struct hyperbus_device hbdev;
struct mux_control *mux_ctrl;
};
static int am654_hbmc_calibrate(struct hyperbus_device *hbdev)
{
struct map_info *map = &hbdev->map;
struct cfi_private cfi;
int count = AM654_HBMC_CALIB_COUNT;
int pass_count = 0;
int ret;
cfi.interleave = 1;
cfi.device_type = CFI_DEVICETYPE_X16;
cfi_send_gen_cmd(0xF0, 0, 0, map, &cfi, cfi.device_type, NULL);
cfi_send_gen_cmd(0x98, 0x55, 0, map, &cfi, cfi.device_type, NULL);
while (count--) {
ret = cfi_qry_present(map, 0, &cfi);
if (ret)
pass_count++;
else
pass_count = 0;
if (pass_count == 5)
break;
}
cfi_qry_mode_off(0, map, &cfi);
return ret;
}
static void am654_hbmc_dma_callback(void *param)
{
struct am654_hbmc_device_priv *priv = param;
complete(&priv->rx_dma_complete);
}
static int am654_hbmc_dma_read(struct am654_hbmc_device_priv *priv, void *to,
unsigned long from, ssize_t len)
{
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
struct dma_chan *rx_chan = priv->rx_chan;
struct dma_async_tx_descriptor *tx;
dma_addr_t dma_dst, dma_src;
dma_cookie_t cookie;
int ret;
if (!priv->rx_chan || !virt_addr_valid(to) || object_is_on_stack(to))
return -EINVAL;
dma_dst = dma_map_single(rx_chan->device->dev, to, len, DMA_FROM_DEVICE);
if (dma_mapping_error(rx_chan->device->dev, dma_dst)) {
dev_dbg(priv->ctlr->dev, "DMA mapping failed\n");
return -EIO;
}
dma_src = priv->device_base + from;
tx = dmaengine_prep_dma_memcpy(rx_chan, dma_dst, dma_src, len, flags);
if (!tx) {
dev_err(priv->ctlr->dev, "device_prep_dma_memcpy error\n");
ret = -EIO;
goto unmap_dma;
}
reinit_completion(&priv->rx_dma_complete);
tx->callback = am654_hbmc_dma_callback;
tx->callback_param = priv;
cookie = dmaengine_submit(tx);
ret = dma_submit_error(cookie);
if (ret) {
dev_err(priv->ctlr->dev, "dma_submit_error %d\n", cookie);
goto unmap_dma;
}
dma_async_issue_pending(rx_chan);
if (!wait_for_completion_timeout(&priv->rx_dma_complete, msecs_to_jiffies(len + 1000))) {
dmaengine_terminate_sync(rx_chan);
dev_err(priv->ctlr->dev, "DMA wait_for_completion_timeout\n");
ret = -ETIMEDOUT;
}
unmap_dma:
dma_unmap_single(rx_chan->device->dev, dma_dst, len, DMA_FROM_DEVICE);
return ret;
}
static void am654_hbmc_read(struct hyperbus_device *hbdev, void *to,
unsigned long from, ssize_t len)
{
struct am654_hbmc_device_priv *priv = hbdev->priv;
if (len < SZ_1K || am654_hbmc_dma_read(priv, to, from, len))
memcpy_fromio(to, hbdev->map.virt + from, len);
}
static const struct hyperbus_ops am654_hbmc_ops = {
.calibrate = am654_hbmc_calibrate,
.copy_from = am654_hbmc_read,
};
static int am654_hbmc_request_mmap_dma(struct am654_hbmc_device_priv *priv)
{
struct dma_chan *rx_chan;
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
rx_chan = dma_request_chan_by_mask(&mask);
if (IS_ERR(rx_chan)) {
if (PTR_ERR(rx_chan) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_dbg(priv->ctlr->dev, "No DMA channel available\n");
return 0;
}
priv->rx_chan = rx_chan;
init_completion(&priv->rx_dma_complete);
return 0;
}
static int am654_hbmc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct am654_hbmc_device_priv *dev_priv;
struct device *dev = &pdev->dev;
struct am654_hbmc_priv *priv;
struct resource res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
platform_set_drvdata(pdev, priv);
priv->hbdev.np = of_get_next_child(np, NULL);
ret = of_address_to_resource(priv->hbdev.np, 0, &res);
if (ret)
return ret;
if (of_property_read_bool(dev->of_node, "mux-controls")) {
struct mux_control *control = devm_mux_control_get(dev, NULL);
if (IS_ERR(control))
return PTR_ERR(control);
ret = mux_control_select(control, 1);
if (ret) {
dev_err(dev, "Failed to select HBMC mux\n");
return ret;
}
priv->mux_ctrl = control;
}
priv->hbdev.map.size = resource_size(&res);
priv->hbdev.map.virt = devm_ioremap_resource(dev, &res);
if (IS_ERR(priv->hbdev.map.virt))
return PTR_ERR(priv->hbdev.map.virt);
priv->ctlr.dev = dev;
priv->ctlr.ops = &am654_hbmc_ops;
priv->hbdev.ctlr = &priv->ctlr;
dev_priv = devm_kzalloc(dev, sizeof(*dev_priv), GFP_KERNEL);
if (!dev_priv) {
ret = -ENOMEM;
goto disable_mux;
}
priv->hbdev.priv = dev_priv;
dev_priv->device_base = res.start;
dev_priv->ctlr = &priv->ctlr;
ret = am654_hbmc_request_mmap_dma(dev_priv);
if (ret)
goto disable_mux;
ret = hyperbus_register_device(&priv->hbdev);
if (ret) {
dev_err(dev, "failed to register controller\n");
goto release_dma;
}
return 0;
release_dma:
if (dev_priv->rx_chan)
dma_release_channel(dev_priv->rx_chan);
disable_mux:
if (priv->mux_ctrl)
mux_control_deselect(priv->mux_ctrl);
return ret;
}
static int am654_hbmc_remove(struct platform_device *pdev)
{
struct am654_hbmc_priv *priv = platform_get_drvdata(pdev);
struct am654_hbmc_device_priv *dev_priv = priv->hbdev.priv;
hyperbus_unregister_device(&priv->hbdev);
if (priv->mux_ctrl)
mux_control_deselect(priv->mux_ctrl);
if (dev_priv->rx_chan)
dma_release_channel(dev_priv->rx_chan);
return 0;
}
static const struct of_device_id am654_hbmc_dt_ids[] = {
{
.compatible = "ti,am654-hbmc",
},
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, am654_hbmc_dt_ids);
static struct platform_driver am654_hbmc_platform_driver = {
.probe = am654_hbmc_probe,
.remove = am654_hbmc_remove,
.driver = {
.name = "hbmc-am654",
.of_match_table = am654_hbmc_dt_ids,
},
};
module_platform_driver(am654_hbmc_platform_driver);
MODULE_DESCRIPTION("HBMC driver for AM654 SoC");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:hbmc-am654");
MODULE_AUTHOR("Vignesh Raghavendra <[email protected]>");
| linux-master | drivers/mtd/hyperbus/hbmc-am654.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Linux driver for RPC-IF HyperFlash
*
* Copyright (C) 2019-2020 Cogent Embedded, Inc.
*/
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/hyperbus.h>
#include <linux/mtd/mtd.h>
#include <linux/mux/consumer.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <memory/renesas-rpc-if.h>
struct rpcif_hyperbus {
struct rpcif rpc;
struct hyperbus_ctlr ctlr;
struct hyperbus_device hbdev;
};
static const struct rpcif_op rpcif_op_tmpl = {
.cmd = {
.buswidth = 8,
.ddr = true,
},
.ocmd = {
.buswidth = 8,
.ddr = true,
},
.addr = {
.nbytes = 1,
.buswidth = 8,
.ddr = true,
},
.data = {
.buswidth = 8,
.ddr = true,
},
};
static void rpcif_hb_prepare_read(struct rpcif *rpc, void *to,
unsigned long from, ssize_t len)
{
struct rpcif_op op = rpcif_op_tmpl;
op.cmd.opcode = HYPERBUS_RW_READ | HYPERBUS_AS_MEM;
op.addr.val = from >> 1;
op.dummy.buswidth = 1;
op.dummy.ncycles = 15;
op.data.dir = RPCIF_DATA_IN;
op.data.nbytes = len;
op.data.buf.in = to;
rpcif_prepare(rpc->dev, &op, NULL, NULL);
}
static void rpcif_hb_prepare_write(struct rpcif *rpc, unsigned long to,
void *from, ssize_t len)
{
struct rpcif_op op = rpcif_op_tmpl;
op.cmd.opcode = HYPERBUS_RW_WRITE | HYPERBUS_AS_MEM;
op.addr.val = to >> 1;
op.data.dir = RPCIF_DATA_OUT;
op.data.nbytes = len;
op.data.buf.out = from;
rpcif_prepare(rpc->dev, &op, NULL, NULL);
}
static u16 rpcif_hb_read16(struct hyperbus_device *hbdev, unsigned long addr)
{
struct rpcif_hyperbus *hyperbus =
container_of(hbdev, struct rpcif_hyperbus, hbdev);
map_word data;
rpcif_hb_prepare_read(&hyperbus->rpc, &data, addr, 2);
rpcif_manual_xfer(hyperbus->rpc.dev);
return data.x[0];
}
static void rpcif_hb_write16(struct hyperbus_device *hbdev, unsigned long addr,
u16 data)
{
struct rpcif_hyperbus *hyperbus =
container_of(hbdev, struct rpcif_hyperbus, hbdev);
rpcif_hb_prepare_write(&hyperbus->rpc, addr, &data, 2);
rpcif_manual_xfer(hyperbus->rpc.dev);
}
static void rpcif_hb_copy_from(struct hyperbus_device *hbdev, void *to,
unsigned long from, ssize_t len)
{
struct rpcif_hyperbus *hyperbus =
container_of(hbdev, struct rpcif_hyperbus, hbdev);
rpcif_hb_prepare_read(&hyperbus->rpc, to, from, len);
rpcif_dirmap_read(hyperbus->rpc.dev, from, len, to);
}
static const struct hyperbus_ops rpcif_hb_ops = {
.read16 = rpcif_hb_read16,
.write16 = rpcif_hb_write16,
.copy_from = rpcif_hb_copy_from,
};
static int rpcif_hb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rpcif_hyperbus *hyperbus;
int error;
hyperbus = devm_kzalloc(dev, sizeof(*hyperbus), GFP_KERNEL);
if (!hyperbus)
return -ENOMEM;
error = rpcif_sw_init(&hyperbus->rpc, pdev->dev.parent);
if (error)
return error;
platform_set_drvdata(pdev, hyperbus);
pm_runtime_enable(hyperbus->rpc.dev);
error = rpcif_hw_init(hyperbus->rpc.dev, true);
if (error)
goto out_disable_rpm;
hyperbus->hbdev.map.size = hyperbus->rpc.size;
hyperbus->hbdev.map.virt = hyperbus->rpc.dirmap;
hyperbus->ctlr.dev = dev;
hyperbus->ctlr.ops = &rpcif_hb_ops;
hyperbus->hbdev.ctlr = &hyperbus->ctlr;
hyperbus->hbdev.np = of_get_next_child(pdev->dev.parent->of_node, NULL);
error = hyperbus_register_device(&hyperbus->hbdev);
if (error)
goto out_disable_rpm;
return 0;
out_disable_rpm:
pm_runtime_disable(hyperbus->rpc.dev);
return error;
}
static int rpcif_hb_remove(struct platform_device *pdev)
{
struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev);
hyperbus_unregister_device(&hyperbus->hbdev);
pm_runtime_disable(hyperbus->rpc.dev);
return 0;
}
static struct platform_driver rpcif_platform_driver = {
.probe = rpcif_hb_probe,
.remove = rpcif_hb_remove,
.driver = {
.name = "rpc-if-hyperflash",
},
};
module_platform_driver(rpcif_platform_driver);
MODULE_DESCRIPTION("Renesas RPC-IF HyperFlash driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mtd/hyperbus/rpc-if.c |
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
// Author: Vignesh Raghavendra <[email protected]>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/hyperbus.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/of.h>
#include <linux/types.h>
static struct hyperbus_device *map_to_hbdev(struct map_info *map)
{
return container_of(map, struct hyperbus_device, map);
}
static map_word hyperbus_read16(struct map_info *map, unsigned long addr)
{
struct hyperbus_device *hbdev = map_to_hbdev(map);
struct hyperbus_ctlr *ctlr = hbdev->ctlr;
map_word read_data;
read_data.x[0] = ctlr->ops->read16(hbdev, addr);
return read_data;
}
static void hyperbus_write16(struct map_info *map, map_word d,
unsigned long addr)
{
struct hyperbus_device *hbdev = map_to_hbdev(map);
struct hyperbus_ctlr *ctlr = hbdev->ctlr;
ctlr->ops->write16(hbdev, addr, d.x[0]);
}
static void hyperbus_copy_from(struct map_info *map, void *to,
unsigned long from, ssize_t len)
{
struct hyperbus_device *hbdev = map_to_hbdev(map);
struct hyperbus_ctlr *ctlr = hbdev->ctlr;
ctlr->ops->copy_from(hbdev, to, from, len);
}
static void hyperbus_copy_to(struct map_info *map, unsigned long to,
const void *from, ssize_t len)
{
struct hyperbus_device *hbdev = map_to_hbdev(map);
struct hyperbus_ctlr *ctlr = hbdev->ctlr;
ctlr->ops->copy_to(hbdev, to, from, len);
}
int hyperbus_register_device(struct hyperbus_device *hbdev)
{
const struct hyperbus_ops *ops;
struct hyperbus_ctlr *ctlr;
struct device_node *np;
struct map_info *map;
struct device *dev;
int ret;
if (!hbdev || !hbdev->np || !hbdev->ctlr || !hbdev->ctlr->dev) {
pr_err("hyperbus: please fill all the necessary fields!\n");
return -EINVAL;
}
np = hbdev->np;
ctlr = hbdev->ctlr;
if (!of_device_is_compatible(np, "cypress,hyperflash")) {
dev_err(ctlr->dev, "\"cypress,hyperflash\" compatible missing\n");
return -ENODEV;
}
hbdev->memtype = HYPERFLASH;
dev = ctlr->dev;
map = &hbdev->map;
map->name = dev_name(dev);
map->bankwidth = 2;
map->device_node = np;
simple_map_init(map);
ops = ctlr->ops;
if (ops) {
if (ops->read16)
map->read = hyperbus_read16;
if (ops->write16)
map->write = hyperbus_write16;
if (ops->copy_to)
map->copy_to = hyperbus_copy_to;
if (ops->copy_from)
map->copy_from = hyperbus_copy_from;
if (ops->calibrate && !ctlr->calibrated) {
ret = ops->calibrate(hbdev);
if (!ret) {
dev_err(dev, "Calibration failed\n");
return -ENODEV;
}
ctlr->calibrated = true;
}
}
hbdev->mtd = do_map_probe("cfi_probe", map);
if (!hbdev->mtd) {
dev_err(dev, "probing of hyperbus device failed\n");
return -ENODEV;
}
hbdev->mtd->dev.parent = dev;
mtd_set_of_node(hbdev->mtd, np);
ret = mtd_device_register(hbdev->mtd, NULL, 0);
if (ret) {
dev_err(dev, "failed to register mtd device\n");
map_destroy(hbdev->mtd);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(hyperbus_register_device);
void hyperbus_unregister_device(struct hyperbus_device *hbdev)
{
if (hbdev && hbdev->mtd) {
WARN_ON(mtd_device_unregister(hbdev->mtd));
map_destroy(hbdev->mtd);
}
}
EXPORT_SYMBOL_GPL(hyperbus_unregister_device);
MODULE_DESCRIPTION("HyperBus Framework");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Vignesh Raghavendra <[email protected]>");
| linux-master | drivers/mtd/hyperbus/hyperbus-core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012 Linutronix GmbH
* Copyright (c) 2014 sigma star gmbh
* Author: Richard Weinberger <[email protected]>
*/
/**
* update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
* @wrk: the work description object
*/
static void update_fastmap_work_fn(struct work_struct *wrk)
{
struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
ubi_update_fastmap(ubi);
spin_lock(&ubi->wl_lock);
ubi->fm_work_scheduled = 0;
spin_unlock(&ubi->wl_lock);
}
/**
* find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
* @root: the RB-tree where to look for
*/
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
{
struct rb_node *p;
struct ubi_wl_entry *e, *victim = NULL;
int max_ec = UBI_MAX_ERASECOUNTER;
ubi_rb_for_each_entry(p, e, root, u.rb) {
if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
victim = e;
max_ec = e->ec;
}
}
return victim;
}
static inline void return_unused_peb(struct ubi_device *ubi,
struct ubi_wl_entry *e)
{
wl_tree_add(e, &ubi->free);
ubi->free_count++;
}
/**
* return_unused_pool_pebs - returns unused PEB to the free tree.
* @ubi: UBI device description object
* @pool: fastmap pool description object
*/
static void return_unused_pool_pebs(struct ubi_device *ubi,
struct ubi_fm_pool *pool)
{
int i;
struct ubi_wl_entry *e;
for (i = pool->used; i < pool->size; i++) {
e = ubi->lookuptbl[pool->pebs[i]];
return_unused_peb(ubi, e);
}
}
/**
* ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
* @ubi: UBI device description object
* @anchor: This PEB will be used as anchor PEB by fastmap
*
* The function returns a physical erase block with a given maximal number
* and removes it from the wl subsystem.
* Must be called with wl_lock held!
*/
struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
{
struct ubi_wl_entry *e = NULL;
if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
goto out;
if (anchor)
e = find_anchor_wl_entry(&ubi->free);
else
e = find_mean_wl_entry(ubi, &ubi->free);
if (!e)
goto out;
self_check_in_wl_tree(ubi, e, &ubi->free);
/* remove it from the free list,
* the wl subsystem does no longer know this erase block */
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
out:
return e;
}
/*
* has_enough_free_count - whether ubi has enough free pebs to fill fm pools
* @ubi: UBI device description object
* @is_wl_pool: whether UBI is filling wear leveling pool
*
* This helper function checks whether there are enough free pebs (deducted
* by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
* there is at least one of free pebs is filled into fm_wl_pool.
* For wear leveling pool, UBI should also reserve free pebs for bad pebs
* handling, because there maybe no enough free pebs for user volumes after
* producing new bad pebs.
*/
static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
{
int fm_used = 0; // fastmap non anchor pebs.
int beb_rsvd_pebs;
if (!ubi->free.rb_node)
return false;
beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
fm_used = ubi->fm_size / ubi->leb_size - 1;
return ubi->free_count - beb_rsvd_pebs > fm_used;
}
/**
* ubi_refill_pools - refills all fastmap PEB pools.
* @ubi: UBI device description object
*/
void ubi_refill_pools(struct ubi_device *ubi)
{
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
struct ubi_fm_pool *pool = &ubi->fm_pool;
struct ubi_wl_entry *e;
int enough;
spin_lock(&ubi->wl_lock);
return_unused_pool_pebs(ubi, wl_pool);
return_unused_pool_pebs(ubi, pool);
wl_pool->size = 0;
pool->size = 0;
if (ubi->fm_anchor) {
wl_tree_add(ubi->fm_anchor, &ubi->free);
ubi->free_count++;
ubi->fm_anchor = NULL;
}
if (!ubi->fm_disabled)
/*
* All available PEBs are in ubi->free, now is the time to get
* the best anchor PEBs.
*/
ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
for (;;) {
enough = 0;
if (pool->size < pool->max_size) {
if (!has_enough_free_count(ubi, false))
break;
e = wl_get_wle(ubi);
if (!e)
break;
pool->pebs[pool->size] = e->pnum;
pool->size++;
} else
enough++;
if (wl_pool->size < wl_pool->max_size) {
if (!has_enough_free_count(ubi, true))
break;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
self_check_in_wl_tree(ubi, e, &ubi->free);
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
wl_pool->pebs[wl_pool->size] = e->pnum;
wl_pool->size++;
} else
enough++;
if (enough == 2)
break;
}
wl_pool->used = 0;
pool->used = 0;
spin_unlock(&ubi->wl_lock);
}
/**
* produce_free_peb - produce a free physical eraseblock.
* @ubi: UBI device description object
*
* This function tries to make a free PEB by means of synchronous execution of
* pending works. This may be needed if, for example the background thread is
* disabled. Returns zero in case of success and a negative error code in case
* of failure.
*/
static int produce_free_peb(struct ubi_device *ubi)
{
int err;
while (!ubi->free.rb_node && ubi->works_count) {
dbg_wl("do one work synchronously");
err = do_work(ubi);
if (err)
return err;
}
return 0;
}
/**
* ubi_wl_get_peb - get a physical eraseblock.
* @ubi: UBI device description object
*
* This function returns a physical eraseblock in case of success and a
* negative error code in case of failure.
* Returns with ubi->fm_eba_sem held in read mode!
*/
int ubi_wl_get_peb(struct ubi_device *ubi)
{
int ret, attempts = 0;
struct ubi_fm_pool *pool = &ubi->fm_pool;
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
again:
down_read(&ubi->fm_eba_sem);
spin_lock(&ubi->wl_lock);
/* We check here also for the WL pool because at this point we can
* refill the WL pool synchronous. */
if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
spin_unlock(&ubi->wl_lock);
up_read(&ubi->fm_eba_sem);
ret = ubi_update_fastmap(ubi);
if (ret) {
ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
down_read(&ubi->fm_eba_sem);
return -ENOSPC;
}
down_read(&ubi->fm_eba_sem);
spin_lock(&ubi->wl_lock);
}
if (pool->used == pool->size) {
spin_unlock(&ubi->wl_lock);
attempts++;
if (attempts == 10) {
ubi_err(ubi, "Unable to get a free PEB from user WL pool");
ret = -ENOSPC;
goto out;
}
up_read(&ubi->fm_eba_sem);
ret = produce_free_peb(ubi);
if (ret < 0) {
down_read(&ubi->fm_eba_sem);
goto out;
}
goto again;
}
ubi_assert(pool->used < pool->size);
ret = pool->pebs[pool->used++];
prot_queue_add(ubi, ubi->lookuptbl[ret]);
spin_unlock(&ubi->wl_lock);
out:
return ret;
}
/**
* next_peb_for_wl - returns next PEB to be used internally by the
* WL sub-system.
*
* @ubi: UBI device description object
*/
static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
{
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
int pnum;
if (pool->used == pool->size)
return NULL;
pnum = pool->pebs[pool->used];
return ubi->lookuptbl[pnum];
}
/**
* need_wear_leveling - checks whether to trigger a wear leveling work.
* UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool'
* and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into
* 'wl_pool' by ubi_refill_pools().
*
* @ubi: UBI device description object
*/
static bool need_wear_leveling(struct ubi_device *ubi)
{
int ec;
struct ubi_wl_entry *e;
if (!ubi->used.rb_node)
return false;
e = next_peb_for_wl(ubi);
if (!e) {
if (!ubi->free.rb_node)
return false;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
ec = e->ec;
} else {
ec = e->ec;
if (ubi->free.rb_node) {
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
ec = max(ec, e->ec);
}
}
e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
return ec - e->ec >= UBI_WL_THRESHOLD;
}
/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
*
* @ubi: UBI device description object
*/
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
{
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
int pnum;
ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
if (pool->used == pool->size) {
/* We cannot update the fastmap here because this
* function is called in atomic context.
* Let's fail here and refill/update it as soon as possible. */
if (!ubi->fm_work_scheduled) {
ubi->fm_work_scheduled = 1;
schedule_work(&ubi->fm_work);
}
return NULL;
}
pnum = pool->pebs[pool->used++];
return ubi->lookuptbl[pnum];
}
/**
* ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
* @ubi: UBI device description object
*/
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock);
/* Do we already have an anchor? */
if (ubi->fm_anchor) {
spin_unlock(&ubi->wl_lock);
return 0;
}
/* See if we can find an anchor PEB on the list of free PEBs */
anchor = ubi_wl_get_fm_peb(ubi, 1);
if (anchor) {
ubi->fm_anchor = anchor;
spin_unlock(&ubi->wl_lock);
return 0;
}
ubi->fm_do_produce_anchor = 1;
/* No luck, trigger wear leveling to produce a new anchor PEB. */
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
}
ubi->wl_scheduled = 1;
spin_unlock(&ubi->wl_lock);
wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
if (!wrk) {
spin_lock(&ubi->wl_lock);
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
return -ENOMEM;
}
wrk->func = &wear_leveling_worker;
__schedule_ubi_work(ubi, wrk);
return 0;
}
/**
* ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
* sub-system.
* see: ubi_wl_put_peb()
*
* @ubi: UBI device description object
* @fm_e: physical eraseblock to return
* @lnum: the last used logical eraseblock number for the PEB
* @torture: if this physical eraseblock has to be tortured
*/
int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
int lnum, int torture)
{
struct ubi_wl_entry *e;
int vol_id, pnum = fm_e->pnum;
dbg_wl("PEB %d", pnum);
ubi_assert(pnum >= 0);
ubi_assert(pnum < ubi->peb_count);
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
/* This can happen if we recovered from a fastmap the very
* first time and writing now a new one. In this case the wl system
* has never seen any PEB used by the original fastmap.
*/
if (!e) {
e = fm_e;
ubi_assert(e->ec >= 0);
ubi->lookuptbl[pnum] = e;
}
spin_unlock(&ubi->wl_lock);
vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
return schedule_erase(ubi, e, vol_id, lnum, torture, true);
}
/**
* ubi_is_erase_work - checks whether a work is erase work.
* @wrk: The work object to be checked
*/
int ubi_is_erase_work(struct ubi_work *wrk)
{
return wrk->func == erase_worker;
}
static void ubi_fastmap_close(struct ubi_device *ubi)
{
int i;
return_unused_pool_pebs(ubi, &ubi->fm_pool);
return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
if (ubi->fm_anchor) {
return_unused_peb(ubi, ubi->fm_anchor);
ubi->fm_anchor = NULL;
}
if (ubi->fm) {
for (i = 0; i < ubi->fm->used_blocks; i++)
kfree(ubi->fm->e[i]);
}
kfree(ubi->fm);
}
/**
* may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
* See find_mean_wl_entry()
*
* @ubi: UBI device description object
* @e: physical eraseblock to return
* @root: RB tree to test against.
*/
static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
struct ubi_wl_entry *e,
struct rb_root *root) {
if (e && !ubi->fm_disabled && !ubi->fm &&
e->pnum < UBI_FM_MAX_START)
e = rb_entry(rb_next(root->rb_node),
struct ubi_wl_entry, u.rb);
return e;
}
| linux-master | drivers/mtd/ubi/fastmap-wl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 Ezequiel Garcia
* Copyright (c) 2011 Free Electrons
*
* Driver parameter handling strongly based on drivers/mtd/ubi/build.c
* Copyright (c) International Business Machines Corp., 2006
* Copyright (c) Nokia Corporation, 2007
* Authors: Artem Bityutskiy, Frank Haverkamp
*/
/*
* Read-only block devices on top of UBI volumes
*
* A simple implementation to allow a block device to be layered on top of a
* UBI volume. The implementation is provided by creating a static 1-to-1
* mapping between the block device and the UBI volume.
*
* The addressed byte is obtained from the addressed block sector, which is
* mapped linearly into the corresponding LEB:
*
* LEB number = addressed byte / LEB size
*
* This feature is compiled in the UBI core, and adds a 'block' parameter
* to allow early creation of block devices on top of UBI volumes. Runtime
* block creation/removal for UBI volumes is provided through two UBI ioctls:
* UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/mtd/ubi.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/hdreg.h>
#include <linux/scatterlist.h>
#include <linux/idr.h>
#include <asm/div64.h>
#include "ubi-media.h"
#include "ubi.h"
/* Maximum number of supported devices */
#define UBIBLOCK_MAX_DEVICES 32
/* Maximum length of the 'block=' parameter */
#define UBIBLOCK_PARAM_LEN 63
/* Maximum number of comma-separated items in the 'block=' parameter */
#define UBIBLOCK_PARAM_COUNT 2
struct ubiblock_param {
int ubi_num;
int vol_id;
char name[UBIBLOCK_PARAM_LEN+1];
};
struct ubiblock_pdu {
struct ubi_sgl usgl;
};
/* Numbers of elements set in the @ubiblock_param array */
static int ubiblock_devs __initdata;
/* MTD devices specification parameters */
static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
struct ubiblock {
struct ubi_volume_desc *desc;
int ubi_num;
int vol_id;
int refcnt;
int leb_size;
struct gendisk *gd;
struct request_queue *rq;
struct mutex dev_mutex;
struct list_head list;
struct blk_mq_tag_set tag_set;
};
/* Linked list of all ubiblock instances */
static LIST_HEAD(ubiblock_devices);
static DEFINE_IDR(ubiblock_minor_idr);
/* Protects ubiblock_devices and ubiblock_minor_idr */
static DEFINE_MUTEX(devices_mutex);
static int ubiblock_major;
static int __init ubiblock_set_param(const char *val,
const struct kernel_param *kp)
{
int i, ret;
size_t len;
struct ubiblock_param *param;
char buf[UBIBLOCK_PARAM_LEN];
char *pbuf = &buf[0];
char *tokens[UBIBLOCK_PARAM_COUNT];
if (!val)
return -EINVAL;
len = strnlen(val, UBIBLOCK_PARAM_LEN);
if (len == 0) {
pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
return 0;
}
if (len == UBIBLOCK_PARAM_LEN) {
pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
val, UBIBLOCK_PARAM_LEN);
return -EINVAL;
}
strcpy(buf, val);
/* Get rid of the final newline */
if (buf[len - 1] == '\n')
buf[len - 1] = '\0';
for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
tokens[i] = strsep(&pbuf, ",");
param = &ubiblock_param[ubiblock_devs];
if (tokens[1]) {
/* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
ret = kstrtoint(tokens[0], 10, ¶m->ubi_num);
if (ret < 0)
return -EINVAL;
/* Second param can be a number or a name */
ret = kstrtoint(tokens[1], 10, ¶m->vol_id);
if (ret < 0) {
param->vol_id = -1;
strcpy(param->name, tokens[1]);
}
} else {
/* One parameter: must be device path */
strcpy(param->name, tokens[0]);
param->ubi_num = -1;
param->vol_id = -1;
}
ubiblock_devs++;
return 0;
}
static const struct kernel_param_ops ubiblock_param_ops = {
.set = ubiblock_set_param,
};
module_param_cb(block, &ubiblock_param_ops, NULL, 0);
MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
"Multiple \"block\" parameters may be specified.\n"
"UBI volumes may be specified by their number, name, or path to the device node.\n"
"Examples\n"
"Using the UBI volume path:\n"
"ubi.block=/dev/ubi0_0\n"
"Using the UBI device, and the volume name:\n"
"ubi.block=0,rootfs\n"
"Using both UBI device number and UBI volume number:\n"
"ubi.block=0,0\n");
static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
{
struct ubiblock *dev;
list_for_each_entry(dev, &ubiblock_devices, list)
if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
return dev;
return NULL;
}
static blk_status_t ubiblock_read(struct request *req)
{
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
struct ubiblock *dev = req->q->queuedata;
u64 pos = blk_rq_pos(req) << 9;
int to_read = blk_rq_bytes(req);
int bytes_left = to_read;
/* Get LEB:offset address to read from */
int offset = do_div(pos, dev->leb_size);
int leb = pos;
struct req_iterator iter;
struct bio_vec bvec;
int ret;
blk_mq_start_request(req);
/*
* It is safe to ignore the return value of blk_rq_map_sg() because
* the number of sg entries is limited to UBI_MAX_SG_COUNT
* and ubi_read_sg() will check that limit.
*/
ubi_sgl_init(&pdu->usgl);
blk_rq_map_sg(req->q, req, pdu->usgl.sg);
while (bytes_left) {
/*
* We can only read one LEB at a time. Therefore if the read
* length is larger than one LEB size, we split the operation.
*/
if (offset + to_read > dev->leb_size)
to_read = dev->leb_size - offset;
ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
if (ret < 0)
break;
bytes_left -= to_read;
to_read = bytes_left;
leb += 1;
offset = 0;
}
rq_for_each_segment(bvec, req, iter)
flush_dcache_page(bvec.bv_page);
blk_mq_end_request(req, errno_to_blk_status(ret));
return BLK_STS_OK;
}
static int ubiblock_open(struct gendisk *disk, blk_mode_t mode)
{
struct ubiblock *dev = disk->private_data;
int ret;
mutex_lock(&dev->dev_mutex);
if (dev->refcnt > 0) {
/*
* The volume is already open, just increase the reference
* counter.
*/
goto out_done;
}
/*
* We want users to be aware they should only mount us as read-only.
* It's just a paranoid check, as write requests will get rejected
* in any case.
*/
if (mode & BLK_OPEN_WRITE) {
ret = -EROFS;
goto out_unlock;
}
dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
if (IS_ERR(dev->desc)) {
dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
dev->ubi_num, dev->vol_id);
ret = PTR_ERR(dev->desc);
dev->desc = NULL;
goto out_unlock;
}
out_done:
dev->refcnt++;
mutex_unlock(&dev->dev_mutex);
return 0;
out_unlock:
mutex_unlock(&dev->dev_mutex);
return ret;
}
static void ubiblock_release(struct gendisk *gd)
{
struct ubiblock *dev = gd->private_data;
mutex_lock(&dev->dev_mutex);
dev->refcnt--;
if (dev->refcnt == 0) {
ubi_close_volume(dev->desc);
dev->desc = NULL;
}
mutex_unlock(&dev->dev_mutex);
}
static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
/* Some tools might require this information */
geo->heads = 1;
geo->cylinders = 1;
geo->sectors = get_capacity(bdev->bd_disk);
geo->start = 0;
return 0;
}
static const struct block_device_operations ubiblock_ops = {
.owner = THIS_MODULE,
.open = ubiblock_open,
.release = ubiblock_release,
.getgeo = ubiblock_getgeo,
};
static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
switch (req_op(bd->rq)) {
case REQ_OP_READ:
return ubiblock_read(bd->rq);
default:
return BLK_STS_IOERR;
}
}
static int ubiblock_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
return 0;
}
static const struct blk_mq_ops ubiblock_mq_ops = {
.queue_rq = ubiblock_queue_rq,
.init_request = ubiblock_init_request,
};
static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
{
u64 size = vi->used_bytes >> 9;
if (vi->used_bytes % 512) {
if (vi->vol_type == UBI_DYNAMIC_VOLUME)
pr_warn("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
vi->used_bytes - (size << 9));
else
pr_info("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
vi->used_bytes - (size << 9));
}
if ((sector_t)size != size)
return -EFBIG;
*disk_capacity = size;
return 0;
}
int ubiblock_create(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
struct gendisk *gd;
u64 disk_capacity;
int ret;
ret = calc_disk_capacity(vi, &disk_capacity);
if (ret) {
return ret;
}
/* Check that the volume isn't already handled */
mutex_lock(&devices_mutex);
if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
ret = -EEXIST;
goto out_unlock;
}
dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
goto out_unlock;
}
mutex_init(&dev->dev_mutex);
dev->ubi_num = vi->ubi_num;
dev->vol_id = vi->vol_id;
dev->leb_size = vi->usable_leb_size;
dev->tag_set.ops = &ubiblock_mq_ops;
dev->tag_set.queue_depth = 64;
dev->tag_set.numa_node = NUMA_NO_NODE;
dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
dev->tag_set.driver_data = dev;
dev->tag_set.nr_hw_queues = 1;
ret = blk_mq_alloc_tag_set(&dev->tag_set);
if (ret) {
dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
goto out_free_dev;
}
/* Initialize the gendisk of this ubiblock device */
gd = blk_mq_alloc_disk(&dev->tag_set, dev);
if (IS_ERR(gd)) {
ret = PTR_ERR(gd);
goto out_free_tags;
}
gd->fops = &ubiblock_ops;
gd->major = ubiblock_major;
gd->minors = 1;
gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
if (gd->first_minor < 0) {
dev_err(disk_to_dev(gd),
"block: dynamic minor allocation failed");
ret = -ENODEV;
goto out_cleanup_disk;
}
gd->flags |= GENHD_FL_NO_PART;
gd->private_data = dev;
sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
set_capacity(gd, disk_capacity);
dev->gd = gd;
dev->rq = gd->queue;
blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
list_add_tail(&dev->list, &ubiblock_devices);
/* Must be the last step: anyone can call file ops from now on */
ret = device_add_disk(vi->dev, dev->gd, NULL);
if (ret)
goto out_remove_minor;
dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
dev->ubi_num, dev->vol_id, vi->name);
mutex_unlock(&devices_mutex);
return 0;
out_remove_minor:
list_del(&dev->list);
idr_remove(&ubiblock_minor_idr, gd->first_minor);
out_cleanup_disk:
put_disk(dev->gd);
out_free_tags:
blk_mq_free_tag_set(&dev->tag_set);
out_free_dev:
kfree(dev);
out_unlock:
mutex_unlock(&devices_mutex);
return ret;
}
static void ubiblock_cleanup(struct ubiblock *dev)
{
/* Stop new requests to arrive */
del_gendisk(dev->gd);
/* Finally destroy the blk queue */
dev_info(disk_to_dev(dev->gd), "released");
put_disk(dev->gd);
blk_mq_free_tag_set(&dev->tag_set);
idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
}
int ubiblock_remove(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
int ret;
mutex_lock(&devices_mutex);
dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
if (!dev) {
ret = -ENODEV;
goto out_unlock;
}
/* Found a device, let's lock it so we can check if it's busy */
mutex_lock(&dev->dev_mutex);
if (dev->refcnt > 0) {
ret = -EBUSY;
goto out_unlock_dev;
}
/* Remove from device list */
list_del(&dev->list);
ubiblock_cleanup(dev);
mutex_unlock(&dev->dev_mutex);
mutex_unlock(&devices_mutex);
kfree(dev);
return 0;
out_unlock_dev:
mutex_unlock(&dev->dev_mutex);
out_unlock:
mutex_unlock(&devices_mutex);
return ret;
}
static int ubiblock_resize(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
u64 disk_capacity;
int ret;
/*
* Need to lock the device list until we stop using the device,
* otherwise the device struct might get released in
* 'ubiblock_remove()'.
*/
mutex_lock(&devices_mutex);
dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
if (!dev) {
mutex_unlock(&devices_mutex);
return -ENODEV;
}
ret = calc_disk_capacity(vi, &disk_capacity);
if (ret) {
mutex_unlock(&devices_mutex);
if (ret == -EFBIG) {
dev_warn(disk_to_dev(dev->gd),
"the volume is too big (%d LEBs), cannot resize",
vi->size);
}
return ret;
}
mutex_lock(&dev->dev_mutex);
if (get_capacity(dev->gd) != disk_capacity) {
set_capacity(dev->gd, disk_capacity);
dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
vi->used_bytes);
}
mutex_unlock(&dev->dev_mutex);
mutex_unlock(&devices_mutex);
return 0;
}
static int ubiblock_notify(struct notifier_block *nb,
unsigned long notification_type, void *ns_ptr)
{
struct ubi_notification *nt = ns_ptr;
switch (notification_type) {
case UBI_VOLUME_ADDED:
/*
* We want to enforce explicit block device creation for
* volumes, so when a volume is added we do nothing.
*/
break;
case UBI_VOLUME_REMOVED:
ubiblock_remove(&nt->vi);
break;
case UBI_VOLUME_RESIZED:
ubiblock_resize(&nt->vi);
break;
case UBI_VOLUME_UPDATED:
/*
* If the volume is static, a content update might mean the
* size (i.e. used_bytes) was also changed.
*/
if (nt->vi.vol_type == UBI_STATIC_VOLUME)
ubiblock_resize(&nt->vi);
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block ubiblock_notifier = {
.notifier_call = ubiblock_notify,
};
static struct ubi_volume_desc * __init
open_volume_desc(const char *name, int ubi_num, int vol_id)
{
if (ubi_num == -1)
/* No ubi num, name must be a vol device path */
return ubi_open_volume_path(name, UBI_READONLY);
else if (vol_id == -1)
/* No vol_id, must be vol_name */
return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
else
return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
}
static void __init ubiblock_create_from_param(void)
{
int i, ret = 0;
struct ubiblock_param *p;
struct ubi_volume_desc *desc;
struct ubi_volume_info vi;
/*
* If there is an error creating one of the ubiblocks, continue on to
* create the following ubiblocks. This helps in a circumstance where
* the kernel command-line specifies multiple block devices and some
* may be broken, but we still want the working ones to come up.
*/
for (i = 0; i < ubiblock_devs; i++) {
p = &ubiblock_param[i];
desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
if (IS_ERR(desc)) {
pr_err(
"UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
p->ubi_num, p->vol_id, PTR_ERR(desc));
continue;
}
ubi_get_volume_info(desc, &vi);
ubi_close_volume(desc);
ret = ubiblock_create(&vi);
if (ret) {
pr_err(
"UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
vi.name, p->ubi_num, p->vol_id, ret);
continue;
}
}
}
static void ubiblock_remove_all(void)
{
struct ubiblock *next;
struct ubiblock *dev;
mutex_lock(&devices_mutex);
list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
/* The module is being forcefully removed */
WARN_ON(dev->desc);
/* Remove from device list */
list_del(&dev->list);
ubiblock_cleanup(dev);
kfree(dev);
}
mutex_unlock(&devices_mutex);
}
int __init ubiblock_init(void)
{
int ret;
ubiblock_major = register_blkdev(0, "ubiblock");
if (ubiblock_major < 0)
return ubiblock_major;
/*
* Attach block devices from 'block=' module param.
* Even if one block device in the param list fails to come up,
* still allow the module to load and leave any others up.
*/
ubiblock_create_from_param();
/*
* Block devices are only created upon user requests, so we ignore
* existing volumes.
*/
ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
if (ret)
goto err_unreg;
return 0;
err_unreg:
unregister_blkdev(ubiblock_major, "ubiblock");
ubiblock_remove_all();
return ret;
}
void __exit ubiblock_exit(void)
{
ubi_unregister_volume_notifier(&ubiblock_notifier);
ubiblock_remove_all();
unregister_blkdev(ubiblock_major, "ubiblock");
}
| linux-master | drivers/mtd/ubi/block.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) International Business Machines Corp., 2006
*
* Author: Artem Bityutskiy (Битюцкий Артём), Joern Engel
*/
/*
* This is a small driver which implements fake MTD devices on top of UBI
* volumes. This sounds strange, but it is in fact quite useful to make
* MTD-oriented software (including all the legacy software) work on top of
* UBI.
*
* Gluebi emulates MTD devices of "MTD_UBIVOLUME" type. Their minimal I/O unit
* size (@mtd->writesize) is equivalent to the UBI minimal I/O unit. The
* eraseblock size is equivalent to the logical eraseblock size of the volume.
*/
#include <linux/err.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/mtd/ubi.h>
#include <linux/mtd/mtd.h>
#include "ubi-media.h"
#define err_msg(fmt, ...) \
pr_err("gluebi (pid %d): %s: " fmt "\n", \
current->pid, __func__, ##__VA_ARGS__)
/**
* struct gluebi_device - a gluebi device description data structure.
* @mtd: emulated MTD device description object
* @refcnt: gluebi device reference count
* @desc: UBI volume descriptor
* @ubi_num: UBI device number this gluebi device works on
* @vol_id: ID of UBI volume this gluebi device works on
* @list: link in a list of gluebi devices
*/
struct gluebi_device {
struct mtd_info mtd;
int refcnt;
struct ubi_volume_desc *desc;
int ubi_num;
int vol_id;
struct list_head list;
};
/* List of all gluebi devices */
static LIST_HEAD(gluebi_devices);
static DEFINE_MUTEX(devices_mutex);
/**
* find_gluebi_nolock - find a gluebi device.
* @ubi_num: UBI device number
* @vol_id: volume ID
*
* This function seraches for gluebi device corresponding to UBI device
* @ubi_num and UBI volume @vol_id. Returns the gluebi device description
* object in case of success and %NULL in case of failure. The caller has to
* have the &devices_mutex locked.
*/
static struct gluebi_device *find_gluebi_nolock(int ubi_num, int vol_id)
{
struct gluebi_device *gluebi;
list_for_each_entry(gluebi, &gluebi_devices, list)
if (gluebi->ubi_num == ubi_num && gluebi->vol_id == vol_id)
return gluebi;
return NULL;
}
/**
* gluebi_get_device - get MTD device reference.
* @mtd: the MTD device description object
*
* This function is called every time the MTD device is being opened and
* implements the MTD get_device() operation. Returns zero in case of success
* and a negative error code in case of failure.
*/
static int gluebi_get_device(struct mtd_info *mtd)
{
struct gluebi_device *gluebi;
int ubi_mode = UBI_READONLY;
if (mtd->flags & MTD_WRITEABLE)
ubi_mode = UBI_READWRITE;
gluebi = container_of(mtd, struct gluebi_device, mtd);
mutex_lock(&devices_mutex);
if (gluebi->refcnt > 0) {
/*
* The MTD device is already referenced and this is just one
* more reference. MTD allows many users to open the same
* volume simultaneously and do not distinguish between
* readers/writers/exclusive/meta openers as UBI does. So we do
* not open the UBI volume again - just increase the reference
* counter and return.
*/
gluebi->refcnt += 1;
mutex_unlock(&devices_mutex);
return 0;
}
/*
* This is the first reference to this UBI volume via the MTD device
* interface. Open the corresponding volume in read-write mode.
*/
gluebi->desc = ubi_open_volume(gluebi->ubi_num, gluebi->vol_id,
ubi_mode);
if (IS_ERR(gluebi->desc)) {
mutex_unlock(&devices_mutex);
return PTR_ERR(gluebi->desc);
}
gluebi->refcnt += 1;
mutex_unlock(&devices_mutex);
return 0;
}
/**
* gluebi_put_device - put MTD device reference.
* @mtd: the MTD device description object
*
* This function is called every time the MTD device is being put. Returns
* zero in case of success and a negative error code in case of failure.
*/
static void gluebi_put_device(struct mtd_info *mtd)
{
struct gluebi_device *gluebi;
gluebi = container_of(mtd, struct gluebi_device, mtd);
mutex_lock(&devices_mutex);
gluebi->refcnt -= 1;
if (gluebi->refcnt == 0)
ubi_close_volume(gluebi->desc);
mutex_unlock(&devices_mutex);
}
/**
* gluebi_read - read operation of emulated MTD devices.
* @mtd: MTD device description object
* @from: absolute offset from where to read
* @len: how many bytes to read
* @retlen: count of read bytes is returned here
* @buf: buffer to store the read data
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, unsigned char *buf)
{
int err = 0, lnum, offs, bytes_left;
struct gluebi_device *gluebi;
gluebi = container_of(mtd, struct gluebi_device, mtd);
lnum = div_u64_rem(from, mtd->erasesize, &offs);
bytes_left = len;
while (bytes_left) {
size_t to_read = mtd->erasesize - offs;
if (to_read > bytes_left)
to_read = bytes_left;
err = ubi_read(gluebi->desc, lnum, buf, offs, to_read);
if (err)
break;
lnum += 1;
offs = 0;
bytes_left -= to_read;
buf += to_read;
}
*retlen = len - bytes_left;
return err;
}
/**
* gluebi_write - write operation of emulated MTD devices.
* @mtd: MTD device description object
* @to: absolute offset where to write
* @len: how many bytes to write
* @retlen: count of written bytes is returned here
* @buf: buffer with data to write
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
int err = 0, lnum, offs, bytes_left;
struct gluebi_device *gluebi;
gluebi = container_of(mtd, struct gluebi_device, mtd);
lnum = div_u64_rem(to, mtd->erasesize, &offs);
if (len % mtd->writesize || offs % mtd->writesize)
return -EINVAL;
bytes_left = len;
while (bytes_left) {
size_t to_write = mtd->erasesize - offs;
if (to_write > bytes_left)
to_write = bytes_left;
err = ubi_leb_write(gluebi->desc, lnum, buf, offs, to_write);
if (err)
break;
lnum += 1;
offs = 0;
bytes_left -= to_write;
buf += to_write;
}
*retlen = len - bytes_left;
return err;
}
/**
* gluebi_erase - erase operation of emulated MTD devices.
* @mtd: the MTD device description object
* @instr: the erase operation description
*
* This function calls the erase callback when finishes. Returns zero in case
* of success and a negative error code in case of failure.
*/
static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
{
int err, i, lnum, count;
struct gluebi_device *gluebi;
if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
return -EINVAL;
lnum = mtd_div_by_eb(instr->addr, mtd);
count = mtd_div_by_eb(instr->len, mtd);
gluebi = container_of(mtd, struct gluebi_device, mtd);
for (i = 0; i < count - 1; i++) {
err = ubi_leb_unmap(gluebi->desc, lnum + i);
if (err)
goto out_err;
}
/*
* MTD erase operations are synchronous, so we have to make sure the
* physical eraseblock is wiped out.
*
* Thus, perform leb_erase instead of leb_unmap operation - leb_erase
* will wait for the end of operations
*/
err = ubi_leb_erase(gluebi->desc, lnum + i);
if (err)
goto out_err;
return 0;
out_err:
instr->fail_addr = (long long)lnum * mtd->erasesize;
return err;
}
/**
* gluebi_create - create a gluebi device for an UBI volume.
* @di: UBI device description object
* @vi: UBI volume description object
*
* This function is called when a new UBI volume is created in order to create
* corresponding fake MTD device. Returns zero in case of success and a
* negative error code in case of failure.
*/
static int gluebi_create(struct ubi_device_info *di,
struct ubi_volume_info *vi)
{
struct gluebi_device *gluebi, *g;
struct mtd_info *mtd;
gluebi = kzalloc(sizeof(struct gluebi_device), GFP_KERNEL);
if (!gluebi)
return -ENOMEM;
mtd = &gluebi->mtd;
mtd->name = kmemdup(vi->name, vi->name_len + 1, GFP_KERNEL);
if (!mtd->name) {
kfree(gluebi);
return -ENOMEM;
}
gluebi->vol_id = vi->vol_id;
gluebi->ubi_num = vi->ubi_num;
mtd->type = MTD_UBIVOLUME;
if (!di->ro_mode)
mtd->flags = MTD_WRITEABLE;
mtd->owner = THIS_MODULE;
mtd->writesize = di->min_io_size;
mtd->erasesize = vi->usable_leb_size;
mtd->_read = gluebi_read;
mtd->_write = gluebi_write;
mtd->_erase = gluebi_erase;
mtd->_get_device = gluebi_get_device;
mtd->_put_device = gluebi_put_device;
/*
* In case of dynamic a volume, MTD device size is just volume size. In
* case of a static volume the size is equivalent to the amount of data
* bytes.
*/
if (vi->vol_type == UBI_DYNAMIC_VOLUME)
mtd->size = (unsigned long long)vi->usable_leb_size * vi->size;
else
mtd->size = vi->used_bytes;
/* Just a sanity check - make sure this gluebi device does not exist */
mutex_lock(&devices_mutex);
g = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (g)
err_msg("gluebi MTD device %d form UBI device %d volume %d already exists",
g->mtd.index, vi->ubi_num, vi->vol_id);
mutex_unlock(&devices_mutex);
if (mtd_device_register(mtd, NULL, 0)) {
err_msg("cannot add MTD device");
kfree(mtd->name);
kfree(gluebi);
return -ENFILE;
}
mutex_lock(&devices_mutex);
list_add_tail(&gluebi->list, &gluebi_devices);
mutex_unlock(&devices_mutex);
return 0;
}
/**
* gluebi_remove - remove a gluebi device.
* @vi: UBI volume description object
*
* This function is called when an UBI volume is removed and it removes
* corresponding fake MTD device. Returns zero in case of success and a
* negative error code in case of failure.
*/
static int gluebi_remove(struct ubi_volume_info *vi)
{
int err = 0;
struct mtd_info *mtd;
struct gluebi_device *gluebi;
mutex_lock(&devices_mutex);
gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (!gluebi) {
err_msg("got remove notification for unknown UBI device %d volume %d",
vi->ubi_num, vi->vol_id);
err = -ENOENT;
} else if (gluebi->refcnt)
err = -EBUSY;
else
list_del(&gluebi->list);
mutex_unlock(&devices_mutex);
if (err)
return err;
mtd = &gluebi->mtd;
err = mtd_device_unregister(mtd);
if (err) {
err_msg("cannot remove fake MTD device %d, UBI device %d, volume %d, error %d",
mtd->index, gluebi->ubi_num, gluebi->vol_id, err);
mutex_lock(&devices_mutex);
list_add_tail(&gluebi->list, &gluebi_devices);
mutex_unlock(&devices_mutex);
return err;
}
kfree(mtd->name);
kfree(gluebi);
return 0;
}
/**
* gluebi_updated - UBI volume was updated notifier.
* @vi: volume info structure
*
* This function is called every time an UBI volume is updated. It does nothing
* if te volume @vol is dynamic, and changes MTD device size if the
* volume is static. This is needed because static volumes cannot be read past
* data they contain. This function returns zero in case of success and a
* negative error code in case of error.
*/
static int gluebi_updated(struct ubi_volume_info *vi)
{
struct gluebi_device *gluebi;
mutex_lock(&devices_mutex);
gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (!gluebi) {
mutex_unlock(&devices_mutex);
err_msg("got update notification for unknown UBI device %d volume %d",
vi->ubi_num, vi->vol_id);
return -ENOENT;
}
if (vi->vol_type == UBI_STATIC_VOLUME)
gluebi->mtd.size = vi->used_bytes;
mutex_unlock(&devices_mutex);
return 0;
}
/**
* gluebi_resized - UBI volume was re-sized notifier.
* @vi: volume info structure
*
* This function is called every time an UBI volume is re-size. It changes the
* corresponding fake MTD device size. This function returns zero in case of
* success and a negative error code in case of error.
*/
static int gluebi_resized(struct ubi_volume_info *vi)
{
struct gluebi_device *gluebi;
mutex_lock(&devices_mutex);
gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (!gluebi) {
mutex_unlock(&devices_mutex);
err_msg("got update notification for unknown UBI device %d volume %d",
vi->ubi_num, vi->vol_id);
return -ENOENT;
}
gluebi->mtd.size = vi->used_bytes;
mutex_unlock(&devices_mutex);
return 0;
}
/**
* gluebi_notify - UBI notification handler.
* @nb: registered notifier block
* @l: notification type
* @ns_ptr: pointer to the &struct ubi_notification object
*/
static int gluebi_notify(struct notifier_block *nb, unsigned long l,
void *ns_ptr)
{
struct ubi_notification *nt = ns_ptr;
switch (l) {
case UBI_VOLUME_ADDED:
gluebi_create(&nt->di, &nt->vi);
break;
case UBI_VOLUME_REMOVED:
gluebi_remove(&nt->vi);
break;
case UBI_VOLUME_RESIZED:
gluebi_resized(&nt->vi);
break;
case UBI_VOLUME_UPDATED:
gluebi_updated(&nt->vi);
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block gluebi_notifier = {
.notifier_call = gluebi_notify,
};
static int __init ubi_gluebi_init(void)
{
return ubi_register_volume_notifier(&gluebi_notifier, 0);
}
static void __exit ubi_gluebi_exit(void)
{
struct gluebi_device *gluebi, *g;
list_for_each_entry_safe(gluebi, g, &gluebi_devices, list) {
int err;
struct mtd_info *mtd = &gluebi->mtd;
err = mtd_device_unregister(mtd);
if (err)
err_msg("error %d while removing gluebi MTD device %d, UBI device %d, volume %d - ignoring",
err, mtd->index, gluebi->ubi_num,
gluebi->vol_id);
kfree(mtd->name);
kfree(gluebi);
}
ubi_unregister_volume_notifier(&gluebi_notifier);
}
module_init(ubi_gluebi_init);
module_exit(ubi_gluebi_exit);
MODULE_DESCRIPTION("MTD emulation layer over UBI volumes");
MODULE_AUTHOR("Artem Bityutskiy, Joern Engel");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/ubi/gluebi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) International Business Machines Corp., 2006
* Copyright (c) Nokia Corporation, 2006, 2007
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/*
* UBI input/output sub-system.
*
* This sub-system provides a uniform way to work with all kinds of the
* underlying MTD devices. It also implements handy functions for reading and
* writing UBI headers.
*
* We are trying to have a paranoid mindset and not to trust to what we read
* from the flash media in order to be more secure and robust. So this
* sub-system validates every single header it reads from the flash media.
*
* Some words about how the eraseblock headers are stored.
*
* The erase counter header is always stored at offset zero. By default, the
* VID header is stored after the EC header at the closest aligned offset
* (i.e. aligned to the minimum I/O unit size). Data starts next to the VID
* header at the closest aligned offset. But this default layout may be
* changed. For example, for different reasons (e.g., optimization) UBI may be
* asked to put the VID header at further offset, and even at an unaligned
* offset. Of course, if the offset of the VID header is unaligned, UBI adds
* proper padding in front of it. Data offset may also be changed but it has to
* be aligned.
*
* About minimal I/O units. In general, UBI assumes flash device model where
* there is only one minimal I/O unit size. E.g., in case of NOR flash it is 1,
* in case of NAND flash it is a NAND page, etc. This is reported by MTD in the
* @ubi->mtd->writesize field. But as an exception, UBI admits use of another
* (smaller) minimal I/O unit size for EC and VID headers to make it possible
* to do different optimizations.
*
* This is extremely useful in case of NAND flashes which admit of several
* write operations to one NAND page. In this case UBI can fit EC and VID
* headers at one NAND page. Thus, UBI may use "sub-page" size as the minimal
* I/O unit for the headers (the @ubi->hdrs_min_io_size field). But it still
* reports NAND page size (@ubi->min_io_size) as a minimal I/O unit for the UBI
* users.
*
* Example: some Samsung NANDs with 2KiB pages allow 4x 512-byte writes, so
* although the minimal I/O unit is 2K, UBI uses 512 bytes for EC and VID
* headers.
*
* Q: why not just to treat sub-page as a minimal I/O unit of this flash
* device, e.g., make @ubi->min_io_size = 512 in the example above?
*
* A: because when writing a sub-page, MTD still writes a full 2K page but the
* bytes which are not relevant to the sub-page are 0xFF. So, basically,
* writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page.
* Thus, we prefer to use sub-pages only for EC and VID headers.
*
* As it was noted above, the VID header may start at a non-aligned offset.
* For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page,
* the VID header may reside at offset 1984 which is the last 64 bytes of the
* last sub-page (EC header is always at offset zero). This causes some
* difficulties when reading and writing VID headers.
*
* Suppose we have a 64-byte buffer and we read a VID header at it. We change
* the data and want to write this VID header out. As we can only write in
* 512-byte chunks, we have to allocate one more buffer and copy our VID header
* to offset 448 of this buffer.
*
* The I/O sub-system does the following trick in order to avoid this extra
* copy. It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID
* header and returns a pointer to offset @ubi->vid_hdr_shift of this buffer.
* When the VID header is being written out, it shifts the VID header pointer
* back and writes the whole sub-page.
*/
#include <linux/crc32.h>
#include <linux/err.h>
#include <linux/slab.h>
#include "ubi.h"
static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
const struct ubi_ec_hdr *ec_hdr);
static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
const struct ubi_vid_hdr *vid_hdr);
static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
int offset, int len);
/**
* ubi_io_read - read data from a physical eraseblock.
* @ubi: UBI device description object
* @buf: buffer where to store the read data
* @pnum: physical eraseblock number to read from
* @offset: offset within the physical eraseblock from where to read
* @len: how many bytes to read
*
* This function reads data from offset @offset of physical eraseblock @pnum
* and stores the read data in the @buf buffer. The following return codes are
* possible:
*
* o %0 if all the requested data were successfully read;
* o %UBI_IO_BITFLIPS if all the requested data were successfully read, but
* correctable bit-flips were detected; this is harmless but may indicate
* that this eraseblock may become bad soon (but do not have to);
* o %-EBADMSG if the MTD subsystem reported about data integrity problems, for
* example it can be an ECC error in case of NAND; this most probably means
* that the data is corrupted;
* o %-EIO if some I/O error occurred;
* o other negative error codes in case of other errors.
*/
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
int len)
{
int err, retries = 0;
size_t read;
loff_t addr;
dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
ubi_assert(len > 0);
err = self_check_not_bad(ubi, pnum);
if (err)
return err;
/*
* Deliberately corrupt the buffer to improve robustness. Indeed, if we
* do not do this, the following may happen:
* 1. The buffer contains data from previous operation, e.g., read from
* another PEB previously. The data looks like expected, e.g., if we
* just do not read anything and return - the caller would not
* notice this. E.g., if we are reading a VID header, the buffer may
* contain a valid VID header from another PEB.
* 2. The driver is buggy and returns us success or -EBADMSG or
* -EUCLEAN, but it does not actually put any data to the buffer.
*
* This may confuse UBI or upper layers - they may think the buffer
* contains valid data while in fact it is just old data. This is
* especially possible because UBI (and UBIFS) relies on CRC, and
* treats data as correct even in case of ECC errors if the CRC is
* correct.
*
* Try to prevent this situation by changing the first byte of the
* buffer.
*/
*((uint8_t *)buf) ^= 0xFF;
addr = (loff_t)pnum * ubi->peb_size + offset;
retry:
err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err) {
const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
if (mtd_is_bitflip(err)) {
/*
* -EUCLEAN is reported if there was a bit-flip which
* was corrected, so this is harmless.
*
* We do not report about it here unless debugging is
* enabled. A corresponding message will be printed
* later, when it is has been scrubbed.
*/
ubi_msg(ubi, "fixable bit-flip detected at PEB %d",
pnum);
ubi_assert(len == read);
return UBI_IO_BITFLIPS;
}
if (retries++ < UBI_IO_RETRIES) {
ubi_warn(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
err, errstr, len, pnum, offset, read);
yield();
goto retry;
}
ubi_err(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
err, errstr, len, pnum, offset, read);
dump_stack();
/*
* The driver should never return -EBADMSG if it failed to read
* all the requested data. But some buggy drivers might do
* this, so we change it to -EIO.
*/
if (read != len && mtd_is_eccerr(err)) {
ubi_assert(0);
err = -EIO;
}
} else {
ubi_assert(len == read);
if (ubi_dbg_is_bitflip(ubi)) {
dbg_gen("bit-flip (emulated)");
err = UBI_IO_BITFLIPS;
}
}
return err;
}
/**
* ubi_io_write - write data to a physical eraseblock.
* @ubi: UBI device description object
* @buf: buffer with the data to write
* @pnum: physical eraseblock number to write to
* @offset: offset within the physical eraseblock where to write
* @len: how many bytes to write
*
* This function writes @len bytes of data from buffer @buf to offset @offset
* of physical eraseblock @pnum. If all the data were successfully written,
* zero is returned. If an error occurred, this function returns a negative
* error code. If %-EIO is returned, the physical eraseblock most probably went
* bad.
*
* Note, in case of an error, it is possible that something was still written
* to the flash media, but may be some garbage.
*/
int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
int len)
{
int err;
size_t written;
loff_t addr;
dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
ubi_assert(offset % ubi->hdrs_min_io_size == 0);
ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
if (ubi->ro_mode) {
ubi_err(ubi, "read-only mode");
return -EROFS;
}
err = self_check_not_bad(ubi, pnum);
if (err)
return err;
/* The area we are writing to has to contain all 0xFF bytes */
err = ubi_self_check_all_ff(ubi, pnum, offset, len);
if (err)
return err;
if (offset >= ubi->leb_start) {
/*
* We write to the data area of the physical eraseblock. Make
* sure it has valid EC and VID headers.
*/
err = self_check_peb_ec_hdr(ubi, pnum);
if (err)
return err;
err = self_check_peb_vid_hdr(ubi, pnum);
if (err)
return err;
}
if (ubi_dbg_is_write_failure(ubi)) {
ubi_err(ubi, "cannot write %d bytes to PEB %d:%d (emulated)",
len, pnum, offset);
dump_stack();
return -EIO;
}
addr = (loff_t)pnum * ubi->peb_size + offset;
err = mtd_write(ubi->mtd, addr, len, &written, buf);
if (err) {
ubi_err(ubi, "error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
err, len, pnum, offset, written);
dump_stack();
ubi_dump_flash(ubi, pnum, offset, len);
} else
ubi_assert(written == len);
if (!err) {
err = self_check_write(ubi, buf, pnum, offset, len);
if (err)
return err;
/*
* Since we always write sequentially, the rest of the PEB has
* to contain only 0xFF bytes.
*/
offset += len;
len = ubi->peb_size - offset;
if (len)
err = ubi_self_check_all_ff(ubi, pnum, offset, len);
}
return err;
}
/**
* do_sync_erase - synchronously erase a physical eraseblock.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to erase
*
* This function synchronously erases physical eraseblock @pnum and returns
* zero in case of success and a negative error code in case of failure. If
* %-EIO is returned, the physical eraseblock most probably went bad.
*/
static int do_sync_erase(struct ubi_device *ubi, int pnum)
{
int err, retries = 0;
struct erase_info ei;
dbg_io("erase PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
if (ubi->ro_mode) {
ubi_err(ubi, "read-only mode");
return -EROFS;
}
retry:
memset(&ei, 0, sizeof(struct erase_info));
ei.addr = (loff_t)pnum * ubi->peb_size;
ei.len = ubi->peb_size;
err = mtd_erase(ubi->mtd, &ei);
if (err) {
if (retries++ < UBI_IO_RETRIES) {
ubi_warn(ubi, "error %d while erasing PEB %d, retry",
err, pnum);
yield();
goto retry;
}
ubi_err(ubi, "cannot erase PEB %d, error %d", pnum, err);
dump_stack();
return err;
}
err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
if (err)
return err;
if (ubi_dbg_is_erase_failure(ubi)) {
ubi_err(ubi, "cannot erase PEB %d (emulated)", pnum);
return -EIO;
}
return 0;
}
/* Patterns to write to a physical eraseblock when torturing it */
static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
/**
* torture_peb - test a supposedly bad physical eraseblock.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to test
*
* This function returns %-EIO if the physical eraseblock did not pass the
* test, a positive number of erase operations done if the test was
* successfully passed, and other negative error codes in case of other errors.
*/
static int torture_peb(struct ubi_device *ubi, int pnum)
{
int err, i, patt_count;
ubi_msg(ubi, "run torture test for PEB %d", pnum);
patt_count = ARRAY_SIZE(patterns);
ubi_assert(patt_count > 0);
mutex_lock(&ubi->buf_mutex);
for (i = 0; i < patt_count; i++) {
err = do_sync_erase(ubi, pnum);
if (err)
goto out;
/* Make sure the PEB contains only 0xFF bytes */
err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
if (err)
goto out;
err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
if (err == 0) {
ubi_err(ubi, "erased PEB %d, but a non-0xFF byte found",
pnum);
err = -EIO;
goto out;
}
/* Write a pattern and check it */
memset(ubi->peb_buf, patterns[i], ubi->peb_size);
err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
if (err)
goto out;
memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
if (err)
goto out;
err = ubi_check_pattern(ubi->peb_buf, patterns[i],
ubi->peb_size);
if (err == 0) {
ubi_err(ubi, "pattern %x checking failed for PEB %d",
patterns[i], pnum);
err = -EIO;
goto out;
}
}
err = patt_count;
ubi_msg(ubi, "PEB %d passed torture test, do not mark it as bad", pnum);
out:
mutex_unlock(&ubi->buf_mutex);
if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
/*
* If a bit-flip or data integrity error was detected, the test
* has not passed because it happened on a freshly erased
* physical eraseblock which means something is wrong with it.
*/
ubi_err(ubi, "read problems on freshly erased PEB %d, must be bad",
pnum);
err = -EIO;
}
return err;
}
/**
* nor_erase_prepare - prepare a NOR flash PEB for erasure.
* @ubi: UBI device description object
* @pnum: physical eraseblock number to prepare
*
* NOR flash, or at least some of them, have peculiar embedded PEB erasure
* algorithm: the PEB is first filled with zeroes, then it is erased. And
* filling with zeroes starts from the end of the PEB. This was observed with
* Spansion S29GL512N NOR flash.
*
* This means that in case of a power cut we may end up with intact data at the
* beginning of the PEB, and all zeroes at the end of PEB. In other words, the
* EC and VID headers are OK, but a large chunk of data at the end of PEB is
* zeroed. This makes UBI mistakenly treat this PEB as used and associate it
* with an LEB, which leads to subsequent failures (e.g., UBIFS fails).
*
* This function is called before erasing NOR PEBs and it zeroes out EC and VID
* magic numbers in order to invalidate them and prevent the failures. Returns
* zero in case of success and a negative error code in case of failure.
*/
static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
{
int err;
size_t written;
loff_t addr;
uint32_t data = 0;
struct ubi_ec_hdr ec_hdr;
struct ubi_vid_io_buf vidb;
/*
* Note, we cannot generally define VID header buffers on stack,
* because of the way we deal with these buffers (see the header
* comment in this file). But we know this is a NOR-specific piece of
* code, so we can do this. But yes, this is error-prone and we should
* (pre-)allocate VID header buffer instead.
*/
struct ubi_vid_hdr vid_hdr;
/*
* If VID or EC is valid, we have to corrupt them before erasing.
* It is important to first invalidate the EC header, and then the VID
* header. Otherwise a power cut may lead to valid EC header and
* invalid VID header, in which case UBI will treat this PEB as
* corrupted and will try to preserve it, and print scary warnings.
*/
addr = (loff_t)pnum * ubi->peb_size;
err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
err != UBI_IO_FF){
err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
if(err)
goto error;
}
ubi_init_vid_buf(ubi, &vidb, &vid_hdr);
ubi_assert(&vid_hdr == ubi_get_vid_hdr(&vidb));
err = ubi_io_read_vid_hdr(ubi, pnum, &vidb, 0);
if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
err != UBI_IO_FF){
addr += ubi->vid_hdr_aloffset;
err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
if (err)
goto error;
}
return 0;
error:
/*
* The PEB contains a valid VID or EC header, but we cannot invalidate
* it. Supposedly the flash media or the driver is screwed up, so
* return an error.
*/
ubi_err(ubi, "cannot invalidate PEB %d, write returned %d", pnum, err);
ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
return -EIO;
}
/**
* ubi_io_sync_erase - synchronously erase a physical eraseblock.
* @ubi: UBI device description object
* @pnum: physical eraseblock number to erase
* @torture: if this physical eraseblock has to be tortured
*
* This function synchronously erases physical eraseblock @pnum. If @torture
* flag is not zero, the physical eraseblock is checked by means of writing
* different patterns to it and reading them back. If the torturing is enabled,
* the physical eraseblock is erased more than once.
*
* This function returns the number of erasures made in case of success, %-EIO
* if the erasure failed or the torturing test failed, and other negative error
* codes in case of other errors. Note, %-EIO means that the physical
* eraseblock is bad.
*/
int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
{
int err, ret = 0;
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
err = self_check_not_bad(ubi, pnum);
if (err != 0)
return err;
if (ubi->ro_mode) {
ubi_err(ubi, "read-only mode");
return -EROFS;
}
/*
* If the flash is ECC-ed then we have to erase the ECC block before we
* can write to it. But the write is in preparation to an erase in the
* first place. This means we cannot zero out EC and VID before the
* erase and we just have to hope the flash starts erasing from the
* start of the page.
*/
if (ubi->nor_flash && ubi->mtd->writesize == 1) {
err = nor_erase_prepare(ubi, pnum);
if (err)
return err;
}
if (torture) {
ret = torture_peb(ubi, pnum);
if (ret < 0)
return ret;
}
err = do_sync_erase(ubi, pnum);
if (err)
return err;
return ret + 1;
}
/**
* ubi_io_is_bad - check if a physical eraseblock is bad.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
* This function returns a positive number if the physical eraseblock is bad,
* zero if not, and a negative error code if an error occurred.
*/
int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
{
struct mtd_info *mtd = ubi->mtd;
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
if (ubi->bad_allowed) {
int ret;
ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
if (ret < 0)
ubi_err(ubi, "error %d while checking if PEB %d is bad",
ret, pnum);
else if (ret)
dbg_io("PEB %d is bad", pnum);
return ret;
}
return 0;
}
/**
* ubi_io_mark_bad - mark a physical eraseblock as bad.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to mark
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
{
int err;
struct mtd_info *mtd = ubi->mtd;
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
if (ubi->ro_mode) {
ubi_err(ubi, "read-only mode");
return -EROFS;
}
if (!ubi->bad_allowed)
return 0;
err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
if (err)
ubi_err(ubi, "cannot mark PEB %d bad, error %d", pnum, err);
return err;
}
/**
* validate_ec_hdr - validate an erase counter header.
* @ubi: UBI device description object
* @ec_hdr: the erase counter header to check
*
* This function returns zero if the erase counter header is OK, and %1 if
* not.
*/
static int validate_ec_hdr(const struct ubi_device *ubi,
const struct ubi_ec_hdr *ec_hdr)
{
long long ec;
int vid_hdr_offset, leb_start;
ec = be64_to_cpu(ec_hdr->ec);
vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset);
leb_start = be32_to_cpu(ec_hdr->data_offset);
if (ec_hdr->version != UBI_VERSION) {
ubi_err(ubi, "node with incompatible UBI version found: this UBI version is %d, image version is %d",
UBI_VERSION, (int)ec_hdr->version);
goto bad;
}
if (vid_hdr_offset != ubi->vid_hdr_offset) {
ubi_err(ubi, "bad VID header offset %d, expected %d",
vid_hdr_offset, ubi->vid_hdr_offset);
goto bad;
}
if (leb_start != ubi->leb_start) {
ubi_err(ubi, "bad data offset %d, expected %d",
leb_start, ubi->leb_start);
goto bad;
}
if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
ubi_err(ubi, "bad erase counter %lld", ec);
goto bad;
}
return 0;
bad:
ubi_err(ubi, "bad EC header");
ubi_dump_ec_hdr(ec_hdr);
dump_stack();
return 1;
}
/**
* ubi_io_read_ec_hdr - read and check an erase counter header.
* @ubi: UBI device description object
* @pnum: physical eraseblock to read from
* @ec_hdr: a &struct ubi_ec_hdr object where to store the read erase counter
* header
* @verbose: be verbose if the header is corrupted or was not found
*
* This function reads erase counter header from physical eraseblock @pnum and
* stores it in @ec_hdr. This function also checks CRC checksum of the read
* erase counter header. The following codes may be returned:
*
* o %0 if the CRC checksum is correct and the header was successfully read;
* o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
* and corrected by the flash driver; this is harmless but may indicate that
* this eraseblock may become bad soon (but may be not);
* o %UBI_IO_BAD_HDR if the erase counter header is corrupted (a CRC error);
* o %UBI_IO_BAD_HDR_EBADMSG is the same as %UBI_IO_BAD_HDR, but there also was
* a data integrity error (uncorrectable ECC error in case of NAND);
* o %UBI_IO_FF if only 0xFF bytes were read (the PEB is supposedly empty)
* o a negative error code in case of failure.
*/
int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
struct ubi_ec_hdr *ec_hdr, int verbose)
{
int err, read_err;
uint32_t crc, magic, hdr_crc;
dbg_io("read EC header from PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
if (read_err) {
if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
return read_err;
/*
* We read all the data, but either a correctable bit-flip
* occurred, or MTD reported a data integrity error
* (uncorrectable ECC error in case of NAND). The former is
* harmless, the later may mean that the read data is
* corrupted. But we have a CRC check-sum and we will detect
* this. If the EC header is still OK, we just report this as
* there was a bit-flip, to force scrubbing.
*/
}
magic = be32_to_cpu(ec_hdr->magic);
if (magic != UBI_EC_HDR_MAGIC) {
if (mtd_is_eccerr(read_err))
return UBI_IO_BAD_HDR_EBADMSG;
/*
* The magic field is wrong. Let's check if we have read all
* 0xFF. If yes, this physical eraseblock is assumed to be
* empty.
*/
if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
/* The physical eraseblock is supposedly empty */
if (verbose)
ubi_warn(ubi, "no EC header found at PEB %d, only 0xFF bytes",
pnum);
dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
pnum);
if (!read_err)
return UBI_IO_FF;
else
return UBI_IO_FF_BITFLIPS;
}
/*
* This is not a valid erase counter header, and these are not
* 0xFF bytes. Report that the header is corrupted.
*/
if (verbose) {
ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
pnum, magic, UBI_EC_HDR_MAGIC);
ubi_dump_ec_hdr(ec_hdr);
}
dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
pnum, magic, UBI_EC_HDR_MAGIC);
return UBI_IO_BAD_HDR;
}
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
if (hdr_crc != crc) {
if (verbose) {
ubi_warn(ubi, "bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
pnum, crc, hdr_crc);
ubi_dump_ec_hdr(ec_hdr);
}
dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
pnum, crc, hdr_crc);
if (!read_err)
return UBI_IO_BAD_HDR;
else
return UBI_IO_BAD_HDR_EBADMSG;
}
/* And of course validate what has just been read from the media */
err = validate_ec_hdr(ubi, ec_hdr);
if (err) {
ubi_err(ubi, "validation failed for PEB %d", pnum);
return -EINVAL;
}
/*
* If there was %-EBADMSG, but the header CRC is still OK, report about
* a bit-flip to force scrubbing on this PEB.
*/
return read_err ? UBI_IO_BITFLIPS : 0;
}
/**
* ubi_io_write_ec_hdr - write an erase counter header.
* @ubi: UBI device description object
* @pnum: physical eraseblock to write to
* @ec_hdr: the erase counter header to write
*
* This function writes erase counter header described by @ec_hdr to physical
* eraseblock @pnum. It also fills most fields of @ec_hdr before writing, so
* the caller do not have to fill them. Callers must only fill the @ec_hdr->ec
* field.
*
* This function returns zero in case of success and a negative error code in
* case of failure. If %-EIO is returned, the physical eraseblock most probably
* went bad.
*/
int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
struct ubi_ec_hdr *ec_hdr)
{
int err;
uint32_t crc;
dbg_io("write EC header to PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC);
ec_hdr->version = UBI_VERSION;
ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
ec_hdr->hdr_crc = cpu_to_be32(crc);
err = self_check_ec_hdr(ubi, pnum, ec_hdr);
if (err)
return err;
if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
return -EROFS;
err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
return err;
}
/**
* validate_vid_hdr - validate a volume identifier header.
* @ubi: UBI device description object
* @vid_hdr: the volume identifier header to check
*
* This function checks that data stored in the volume identifier header
* @vid_hdr. Returns zero if the VID header is OK and %1 if not.
*/
static int validate_vid_hdr(const struct ubi_device *ubi,
const struct ubi_vid_hdr *vid_hdr)
{
int vol_type = vid_hdr->vol_type;
int copy_flag = vid_hdr->copy_flag;
int vol_id = be32_to_cpu(vid_hdr->vol_id);
int lnum = be32_to_cpu(vid_hdr->lnum);
int compat = vid_hdr->compat;
int data_size = be32_to_cpu(vid_hdr->data_size);
int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
int data_pad = be32_to_cpu(vid_hdr->data_pad);
int data_crc = be32_to_cpu(vid_hdr->data_crc);
int usable_leb_size = ubi->leb_size - data_pad;
if (copy_flag != 0 && copy_flag != 1) {
ubi_err(ubi, "bad copy_flag");
goto bad;
}
if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
data_pad < 0) {
ubi_err(ubi, "negative values");
goto bad;
}
if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
ubi_err(ubi, "bad vol_id");
goto bad;
}
if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
ubi_err(ubi, "bad compat");
goto bad;
}
if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
compat != UBI_COMPAT_REJECT) {
ubi_err(ubi, "bad compat");
goto bad;
}
if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
ubi_err(ubi, "bad vol_type");
goto bad;
}
if (data_pad >= ubi->leb_size / 2) {
ubi_err(ubi, "bad data_pad");
goto bad;
}
if (data_size > ubi->leb_size) {
ubi_err(ubi, "bad data_size");
goto bad;
}
if (vol_type == UBI_VID_STATIC) {
/*
* Although from high-level point of view static volumes may
* contain zero bytes of data, but no VID headers can contain
* zero at these fields, because they empty volumes do not have
* mapped logical eraseblocks.
*/
if (used_ebs == 0) {
ubi_err(ubi, "zero used_ebs");
goto bad;
}
if (data_size == 0) {
ubi_err(ubi, "zero data_size");
goto bad;
}
if (lnum < used_ebs - 1) {
if (data_size != usable_leb_size) {
ubi_err(ubi, "bad data_size");
goto bad;
}
} else if (lnum > used_ebs - 1) {
ubi_err(ubi, "too high lnum");
goto bad;
}
} else {
if (copy_flag == 0) {
if (data_crc != 0) {
ubi_err(ubi, "non-zero data CRC");
goto bad;
}
if (data_size != 0) {
ubi_err(ubi, "non-zero data_size");
goto bad;
}
} else {
if (data_size == 0) {
ubi_err(ubi, "zero data_size of copy");
goto bad;
}
}
if (used_ebs != 0) {
ubi_err(ubi, "bad used_ebs");
goto bad;
}
}
return 0;
bad:
ubi_err(ubi, "bad VID header");
ubi_dump_vid_hdr(vid_hdr);
dump_stack();
return 1;
}
/**
* ubi_io_read_vid_hdr - read and check a volume identifier header.
* @ubi: UBI device description object
* @pnum: physical eraseblock number to read from
* @vidb: the volume identifier buffer to store data in
* @verbose: be verbose if the header is corrupted or wasn't found
*
* This function reads the volume identifier header from physical eraseblock
* @pnum and stores it in @vidb. It also checks CRC checksum of the read
* volume identifier header. The error codes are the same as in
* 'ubi_io_read_ec_hdr()'.
*
* Note, the implementation of this function is also very similar to
* 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'.
*/
int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_io_buf *vidb, int verbose)
{
int err, read_err;
uint32_t crc, magic, hdr_crc;
struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
void *p = vidb->buffer;
dbg_io("read VID header from PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_shift + UBI_VID_HDR_SIZE);
if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
return read_err;
magic = be32_to_cpu(vid_hdr->magic);
if (magic != UBI_VID_HDR_MAGIC) {
if (mtd_is_eccerr(read_err))
return UBI_IO_BAD_HDR_EBADMSG;
if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
if (verbose)
ubi_warn(ubi, "no VID header found at PEB %d, only 0xFF bytes",
pnum);
dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
pnum);
if (!read_err)
return UBI_IO_FF;
else
return UBI_IO_FF_BITFLIPS;
}
if (verbose) {
ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
pnum, magic, UBI_VID_HDR_MAGIC);
ubi_dump_vid_hdr(vid_hdr);
}
dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
pnum, magic, UBI_VID_HDR_MAGIC);
return UBI_IO_BAD_HDR;
}
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
if (hdr_crc != crc) {
if (verbose) {
ubi_warn(ubi, "bad CRC at PEB %d, calculated %#08x, read %#08x",
pnum, crc, hdr_crc);
ubi_dump_vid_hdr(vid_hdr);
}
dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
pnum, crc, hdr_crc);
if (!read_err)
return UBI_IO_BAD_HDR;
else
return UBI_IO_BAD_HDR_EBADMSG;
}
err = validate_vid_hdr(ubi, vid_hdr);
if (err) {
ubi_err(ubi, "validation failed for PEB %d", pnum);
return -EINVAL;
}
return read_err ? UBI_IO_BITFLIPS : 0;
}
/**
* ubi_io_write_vid_hdr - write a volume identifier header.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to write to
* @vidb: the volume identifier buffer to write
*
* This function writes the volume identifier header described by @vid_hdr to
* physical eraseblock @pnum. This function automatically fills the
* @vidb->hdr->magic and the @vidb->hdr->version fields, as well as calculates
* header CRC checksum and stores it at vidb->hdr->hdr_crc.
*
* This function returns zero in case of success and a negative error code in
* case of failure. If %-EIO is returned, the physical eraseblock probably went
* bad.
*/
int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_io_buf *vidb)
{
struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
int err;
uint32_t crc;
void *p = vidb->buffer;
dbg_io("write VID header to PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
err = self_check_peb_ec_hdr(ubi, pnum);
if (err)
return err;
vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
vid_hdr->version = UBI_VERSION;
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
vid_hdr->hdr_crc = cpu_to_be32(crc);
err = self_check_vid_hdr(ubi, pnum, vid_hdr);
if (err)
return err;
if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
return -EROFS;
err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
return err;
}
/**
* self_check_not_bad - ensure that a physical eraseblock is not bad.
* @ubi: UBI device description object
* @pnum: physical eraseblock number to check
*
* This function returns zero if the physical eraseblock is good, %-EINVAL if
* it is bad and a negative error code if an error occurred.
*/
static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
{
int err;
if (!ubi_dbg_chk_io(ubi))
return 0;
err = ubi_io_is_bad(ubi, pnum);
if (!err)
return err;
ubi_err(ubi, "self-check failed for PEB %d", pnum);
dump_stack();
return err > 0 ? -EINVAL : err;
}
/**
* self_check_ec_hdr - check if an erase counter header is all right.
* @ubi: UBI device description object
* @pnum: physical eraseblock number the erase counter header belongs to
* @ec_hdr: the erase counter header to check
*
* This function returns zero if the erase counter header contains valid
* values, and %-EINVAL if not.
*/
static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
const struct ubi_ec_hdr *ec_hdr)
{
int err;
uint32_t magic;
if (!ubi_dbg_chk_io(ubi))
return 0;
magic = be32_to_cpu(ec_hdr->magic);
if (magic != UBI_EC_HDR_MAGIC) {
ubi_err(ubi, "bad magic %#08x, must be %#08x",
magic, UBI_EC_HDR_MAGIC);
goto fail;
}
err = validate_ec_hdr(ubi, ec_hdr);
if (err) {
ubi_err(ubi, "self-check failed for PEB %d", pnum);
goto fail;
}
return 0;
fail:
ubi_dump_ec_hdr(ec_hdr);
dump_stack();
return -EINVAL;
}
/**
* self_check_peb_ec_hdr - check erase counter header.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
* This function returns zero if the erase counter header is all right and
* a negative error code if not or if an error occurred.
*/
static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
{
int err;
uint32_t crc, hdr_crc;
struct ubi_ec_hdr *ec_hdr;
if (!ubi_dbg_chk_io(ubi))
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
if (!ec_hdr)
return -ENOMEM;
err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
goto exit;
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
if (hdr_crc != crc) {
ubi_err(ubi, "bad CRC, calculated %#08x, read %#08x",
crc, hdr_crc);
ubi_err(ubi, "self-check failed for PEB %d", pnum);
ubi_dump_ec_hdr(ec_hdr);
dump_stack();
err = -EINVAL;
goto exit;
}
err = self_check_ec_hdr(ubi, pnum, ec_hdr);
exit:
kfree(ec_hdr);
return err;
}
/**
* self_check_vid_hdr - check that a volume identifier header is all right.
* @ubi: UBI device description object
* @pnum: physical eraseblock number the volume identifier header belongs to
* @vid_hdr: the volume identifier header to check
*
* This function returns zero if the volume identifier header is all right, and
* %-EINVAL if not.
*/
static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
const struct ubi_vid_hdr *vid_hdr)
{
int err;
uint32_t magic;
if (!ubi_dbg_chk_io(ubi))
return 0;
magic = be32_to_cpu(vid_hdr->magic);
if (magic != UBI_VID_HDR_MAGIC) {
ubi_err(ubi, "bad VID header magic %#08x at PEB %d, must be %#08x",
magic, pnum, UBI_VID_HDR_MAGIC);
goto fail;
}
err = validate_vid_hdr(ubi, vid_hdr);
if (err) {
ubi_err(ubi, "self-check failed for PEB %d", pnum);
goto fail;
}
return err;
fail:
ubi_err(ubi, "self-check failed for PEB %d", pnum);
ubi_dump_vid_hdr(vid_hdr);
dump_stack();
return -EINVAL;
}
/**
* self_check_peb_vid_hdr - check volume identifier header.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
* This function returns zero if the volume identifier header is all right,
* and a negative error code if not or if an error occurred.
*/
static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
{
int err;
uint32_t crc, hdr_crc;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
void *p;
if (!ubi_dbg_chk_io(ubi))
return 0;
vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!vidb)
return -ENOMEM;
vid_hdr = ubi_get_vid_hdr(vidb);
p = vidb->buffer;
err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
goto exit;
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
if (hdr_crc != crc) {
ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
pnum, crc, hdr_crc);
ubi_err(ubi, "self-check failed for PEB %d", pnum);
ubi_dump_vid_hdr(vid_hdr);
dump_stack();
err = -EINVAL;
goto exit;
}
err = self_check_vid_hdr(ubi, pnum, vid_hdr);
exit:
ubi_free_vid_buf(vidb);
return err;
}
/**
* self_check_write - make sure write succeeded.
* @ubi: UBI device description object
* @buf: buffer with data which were written
* @pnum: physical eraseblock number the data were written to
* @offset: offset within the physical eraseblock the data were written to
* @len: how many bytes were written
*
* This functions reads data which were recently written and compares it with
* the original data buffer - the data have to match. Returns zero if the data
* match and a negative error code if not or in case of failure.
*/
static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
int offset, int len)
{
int err, i;
size_t read;
void *buf1;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
if (!ubi_dbg_chk_io(ubi))
return 0;
buf1 = __vmalloc(len, GFP_NOFS);
if (!buf1) {
ubi_err(ubi, "cannot allocate memory to check writes");
return 0;
}
err = mtd_read(ubi->mtd, addr, len, &read, buf1);
if (err && !mtd_is_bitflip(err))
goto out_free;
for (i = 0; i < len; i++) {
uint8_t c = ((uint8_t *)buf)[i];
uint8_t c1 = ((uint8_t *)buf1)[i];
int dump_len;
if (c == c1)
continue;
ubi_err(ubi, "self-check failed for PEB %d:%d, len %d",
pnum, offset, len);
ubi_msg(ubi, "data differ at position %d", i);
dump_len = max_t(int, 128, len - i);
ubi_msg(ubi, "hex dump of the original buffer from %d to %d",
i, i + dump_len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
buf + i, dump_len, 1);
ubi_msg(ubi, "hex dump of the read buffer from %d to %d",
i, i + dump_len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
buf1 + i, dump_len, 1);
dump_stack();
err = -EINVAL;
goto out_free;
}
vfree(buf1);
return 0;
out_free:
vfree(buf1);
return err;
}
/**
* ubi_self_check_all_ff - check that a region of flash is empty.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
* @offset: the starting offset within the physical eraseblock to check
* @len: the length of the region to check
*
* This function returns zero if only 0xFF bytes are present at offset
* @offset of the physical eraseblock @pnum, and a negative error code if not
* or if an error occurred.
*/
int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
{
size_t read;
int err;
void *buf;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
if (!ubi_dbg_chk_io(ubi))
return 0;
buf = __vmalloc(len, GFP_NOFS);
if (!buf) {
ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
return 0;
}
err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err && !mtd_is_bitflip(err)) {
ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
err, len, pnum, offset, read);
goto error;
}
err = ubi_check_pattern(buf, 0xFF, len);
if (err == 0) {
ubi_err(ubi, "flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
pnum, offset, len);
goto fail;
}
vfree(buf);
return 0;
fail:
ubi_err(ubi, "self-check failed for PEB %d", pnum);
ubi_msg(ubi, "hex dump of the %d-%d region", offset, offset + len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
err = -EINVAL;
error:
dump_stack();
vfree(buf);
return err;
}
| linux-master | drivers/mtd/ubi/io.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) International Business Machines Corp., 2006
* Copyright (c) Nokia Corporation, 2006
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*
* Jan 2007: Alexander Schmidt, hacked per-volume update.
*/
/*
* This file contains implementation of the volume update and atomic LEB change
* functionality.
*
* The update operation is based on the per-volume update marker which is
* stored in the volume table. The update marker is set before the update
* starts, and removed after the update has been finished. So if the update was
* interrupted by an unclean re-boot or due to some other reasons, the update
* marker stays on the flash media and UBI finds it when it attaches the MTD
* device next time. If the update marker is set for a volume, the volume is
* treated as damaged and most I/O operations are prohibited. Only a new update
* operation is allowed.
*
* Note, in general it is possible to implement the update operation as a
* transaction with a roll-back capability.
*/
#include <linux/err.h>
#include <linux/uaccess.h>
#include <linux/math64.h>
#include "ubi.h"
/**
* set_update_marker - set update marker.
* @ubi: UBI device description object
* @vol: volume description object
*
* This function sets the update marker flag for volume @vol. Returns zero
* in case of success and a negative error code in case of failure.
*/
static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
{
int err;
struct ubi_vtbl_record vtbl_rec;
dbg_gen("set update marker for volume %d", vol->vol_id);
if (vol->upd_marker) {
ubi_assert(ubi->vtbl[vol->vol_id].upd_marker);
dbg_gen("already set");
return 0;
}
vtbl_rec = ubi->vtbl[vol->vol_id];
vtbl_rec.upd_marker = 1;
mutex_lock(&ubi->device_mutex);
err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
vol->upd_marker = 1;
mutex_unlock(&ubi->device_mutex);
return err;
}
/**
* clear_update_marker - clear update marker.
* @ubi: UBI device description object
* @vol: volume description object
* @bytes: new data size in bytes
*
* This function clears the update marker for volume @vol, sets new volume
* data size and clears the "corrupted" flag (static volumes only). Returns
* zero in case of success and a negative error code in case of failure.
*/
static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
long long bytes)
{
int err;
struct ubi_vtbl_record vtbl_rec;
dbg_gen("clear update marker for volume %d", vol->vol_id);
vtbl_rec = ubi->vtbl[vol->vol_id];
ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
vtbl_rec.upd_marker = 0;
if (vol->vol_type == UBI_STATIC_VOLUME) {
vol->corrupted = 0;
vol->used_bytes = bytes;
vol->used_ebs = div_u64_rem(bytes, vol->usable_leb_size,
&vol->last_eb_bytes);
if (vol->last_eb_bytes)
vol->used_ebs += 1;
else
vol->last_eb_bytes = vol->usable_leb_size;
}
mutex_lock(&ubi->device_mutex);
err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
vol->upd_marker = 0;
mutex_unlock(&ubi->device_mutex);
return err;
}
/**
* ubi_start_update - start volume update.
* @ubi: UBI device description object
* @vol: volume description object
* @bytes: update bytes
*
* This function starts volume update operation. If @bytes is zero, the volume
* is just wiped out. Returns zero in case of success and a negative error code
* in case of failure.
*/
int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
long long bytes)
{
int i, err;
dbg_gen("start update of volume %d, %llu bytes", vol->vol_id, bytes);
ubi_assert(!vol->updating && !vol->changing_leb);
vol->updating = 1;
vol->upd_buf = vmalloc(ubi->leb_size);
if (!vol->upd_buf)
return -ENOMEM;
err = set_update_marker(ubi, vol);
if (err)
return err;
/* Before updating - wipe out the volume */
for (i = 0; i < vol->reserved_pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol, i);
if (err)
return err;
}
err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
if (err)
return err;
if (bytes == 0) {
err = clear_update_marker(ubi, vol, 0);
if (err)
return err;
vfree(vol->upd_buf);
vol->updating = 0;
return 0;
}
vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
vol->usable_leb_size);
vol->upd_bytes = bytes;
vol->upd_received = 0;
return 0;
}
/**
* ubi_start_leb_change - start atomic LEB change.
* @ubi: UBI device description object
* @vol: volume description object
* @req: operation request
*
* This function starts atomic LEB change operation. Returns zero in case of
* success and a negative error code in case of failure.
*/
int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
const struct ubi_leb_change_req *req)
{
ubi_assert(!vol->updating && !vol->changing_leb);
dbg_gen("start changing LEB %d:%d, %u bytes",
vol->vol_id, req->lnum, req->bytes);
if (req->bytes == 0)
return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0);
vol->upd_bytes = req->bytes;
vol->upd_received = 0;
vol->changing_leb = 1;
vol->ch_lnum = req->lnum;
vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
if (!vol->upd_buf)
return -ENOMEM;
return 0;
}
/**
* write_leb - write update data.
* @ubi: UBI device description object
* @vol: volume description object
* @lnum: logical eraseblock number
* @buf: data to write
* @len: data size
* @used_ebs: how many logical eraseblocks will this volume contain (static
* volumes only)
*
* This function writes update data to corresponding logical eraseblock. In
* case of dynamic volume, this function checks if the data contains 0xFF bytes
* at the end. If yes, the 0xFF bytes are cut and not written. So if the whole
* buffer contains only 0xFF bytes, the LEB is left unmapped.
*
* The reason why we skip the trailing 0xFF bytes in case of dynamic volume is
* that we want to make sure that more data may be appended to the logical
* eraseblock in future. Indeed, writing 0xFF bytes may have side effects and
* this PEB won't be writable anymore. So if one writes the file-system image
* to the UBI volume where 0xFFs mean free space - UBI makes sure this free
* space is writable after the update.
*
* We do not do this for static volumes because they are read-only. But this
* also cannot be done because we have to store per-LEB CRC and the correct
* data length.
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
void *buf, int len, int used_ebs)
{
int err;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
int l = ALIGN(len, ubi->min_io_size);
memset(buf + len, 0xFF, l - len);
len = ubi_calc_data_len(ubi, buf, l);
if (len == 0) {
dbg_gen("all %d bytes contain 0xFF - skip", len);
return 0;
}
err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len);
} else {
/*
* When writing static volume, and this is the last logical
* eraseblock, the length (@len) does not have to be aligned to
* the minimal flash I/O unit. The 'ubi_eba_write_leb_st()'
* function accepts exact (unaligned) length and stores it in
* the VID header. And it takes care of proper alignment by
* padding the buffer. Here we just make sure the padding will
* contain zeros, not random trash.
*/
memset(buf + len, 0, vol->usable_leb_size - len);
err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, used_ebs);
}
return err;
}
/**
* ubi_more_update_data - write more update data.
* @ubi: UBI device description object
* @vol: volume description object
* @buf: write data (user-space memory buffer)
* @count: how much bytes to write
*
* This function writes more data to the volume which is being updated. It may
* be called arbitrary number of times until all the update data arriveis. This
* function returns %0 in case of success, number of bytes written during the
* last call if the whole volume update has been successfully finished, and a
* negative error code in case of failure.
*/
int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
const void __user *buf, int count)
{
int lnum, offs, err = 0, len, to_write = count;
dbg_gen("write %d of %lld bytes, %lld already passed",
count, vol->upd_bytes, vol->upd_received);
if (ubi->ro_mode)
return -EROFS;
lnum = div_u64_rem(vol->upd_received, vol->usable_leb_size, &offs);
if (vol->upd_received + count > vol->upd_bytes)
to_write = count = vol->upd_bytes - vol->upd_received;
/*
* When updating volumes, we accumulate whole logical eraseblock of
* data and write it at once.
*/
if (offs != 0) {
/*
* This is a write to the middle of the logical eraseblock. We
* copy the data to our update buffer and wait for more data or
* flush it if the whole eraseblock is written or the update
* is finished.
*/
len = vol->usable_leb_size - offs;
if (len > count)
len = count;
err = copy_from_user(vol->upd_buf + offs, buf, len);
if (err)
return -EFAULT;
if (offs + len == vol->usable_leb_size ||
vol->upd_received + len == vol->upd_bytes) {
int flush_len = offs + len;
/*
* OK, we gathered either the whole eraseblock or this
* is the last chunk, it's time to flush the buffer.
*/
ubi_assert(flush_len <= vol->usable_leb_size);
err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len,
vol->upd_ebs);
if (err)
return err;
}
vol->upd_received += len;
count -= len;
buf += len;
lnum += 1;
}
/*
* If we've got more to write, let's continue. At this point we know we
* are starting from the beginning of an eraseblock.
*/
while (count) {
if (count > vol->usable_leb_size)
len = vol->usable_leb_size;
else
len = count;
err = copy_from_user(vol->upd_buf, buf, len);
if (err)
return -EFAULT;
if (len == vol->usable_leb_size ||
vol->upd_received + len == vol->upd_bytes) {
err = write_leb(ubi, vol, lnum, vol->upd_buf,
len, vol->upd_ebs);
if (err)
break;
}
vol->upd_received += len;
count -= len;
lnum += 1;
buf += len;
}
ubi_assert(vol->upd_received <= vol->upd_bytes);
if (vol->upd_received == vol->upd_bytes) {
err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
if (err)
return err;
/* The update is finished, clear the update marker */
err = clear_update_marker(ubi, vol, vol->upd_bytes);
if (err)
return err;
vol->updating = 0;
err = to_write;
vfree(vol->upd_buf);
}
return err;
}
/**
* ubi_more_leb_change_data - accept more data for atomic LEB change.
* @ubi: UBI device description object
* @vol: volume description object
* @buf: write data (user-space memory buffer)
* @count: how much bytes to write
*
* This function accepts more data to the volume which is being under the
* "atomic LEB change" operation. It may be called arbitrary number of times
* until all data arrives. This function returns %0 in case of success, number
* of bytes written during the last call if the whole "atomic LEB change"
* operation has been successfully finished, and a negative error code in case
* of failure.
*/
int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
const void __user *buf, int count)
{
int err;
dbg_gen("write %d of %lld bytes, %lld already passed",
count, vol->upd_bytes, vol->upd_received);
if (ubi->ro_mode)
return -EROFS;
if (vol->upd_received + count > vol->upd_bytes)
count = vol->upd_bytes - vol->upd_received;
err = copy_from_user(vol->upd_buf + vol->upd_received, buf, count);
if (err)
return -EFAULT;
vol->upd_received += count;
if (vol->upd_received == vol->upd_bytes) {
int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size);
memset(vol->upd_buf + vol->upd_bytes, 0xFF,
len - vol->upd_bytes);
len = ubi_calc_data_len(ubi, vol->upd_buf, len);
err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
vol->upd_buf, len);
if (err)
return err;
}
ubi_assert(vol->upd_received <= vol->upd_bytes);
if (vol->upd_received == vol->upd_bytes) {
vol->changing_leb = 0;
err = count;
vfree(vol->upd_buf);
}
return err;
}
| linux-master | drivers/mtd/ubi/upd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) International Business Machines Corp., 2006
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
#include "ubi.h"
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/seq_file.h>
/**
* ubi_dump_flash - dump a region of flash.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to dump
* @offset: the starting offset within the physical eraseblock to dump
* @len: the length of the region to dump
*/
void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
{
int err;
size_t read;
void *buf;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
buf = vmalloc(len);
if (!buf)
return;
err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err && err != -EUCLEAN) {
ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
err, len, pnum, offset, read);
goto out;
}
ubi_msg(ubi, "dumping %d bytes of data from PEB %d, offset %d",
len, pnum, offset);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
out:
vfree(buf);
return;
}
/**
* ubi_dump_ec_hdr - dump an erase counter header.
* @ec_hdr: the erase counter header to dump
*/
void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
{
pr_err("Erase counter header dump:\n");
pr_err("\tmagic %#08x\n", be32_to_cpu(ec_hdr->magic));
pr_err("\tversion %d\n", (int)ec_hdr->version);
pr_err("\tec %llu\n", (long long)be64_to_cpu(ec_hdr->ec));
pr_err("\tvid_hdr_offset %d\n", be32_to_cpu(ec_hdr->vid_hdr_offset));
pr_err("\tdata_offset %d\n", be32_to_cpu(ec_hdr->data_offset));
pr_err("\timage_seq %d\n", be32_to_cpu(ec_hdr->image_seq));
pr_err("\thdr_crc %#08x\n", be32_to_cpu(ec_hdr->hdr_crc));
pr_err("erase counter header hexdump:\n");
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
ec_hdr, UBI_EC_HDR_SIZE, 1);
}
/**
* ubi_dump_vid_hdr - dump a volume identifier header.
* @vid_hdr: the volume identifier header to dump
*/
void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
{
pr_err("Volume identifier header dump:\n");
pr_err("\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
pr_err("\tversion %d\n", (int)vid_hdr->version);
pr_err("\tvol_type %d\n", (int)vid_hdr->vol_type);
pr_err("\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
pr_err("\tcompat %d\n", (int)vid_hdr->compat);
pr_err("\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
pr_err("\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
pr_err("\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
pr_err("\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
pr_err("\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
pr_err("\tsqnum %llu\n",
(unsigned long long)be64_to_cpu(vid_hdr->sqnum));
pr_err("\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
pr_err("Volume identifier header hexdump:\n");
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
vid_hdr, UBI_VID_HDR_SIZE, 1);
}
/**
* ubi_dump_vol_info - dump volume information.
* @vol: UBI volume description object
*/
void ubi_dump_vol_info(const struct ubi_volume *vol)
{
pr_err("Volume information dump:\n");
pr_err("\tvol_id %d\n", vol->vol_id);
pr_err("\treserved_pebs %d\n", vol->reserved_pebs);
pr_err("\talignment %d\n", vol->alignment);
pr_err("\tdata_pad %d\n", vol->data_pad);
pr_err("\tvol_type %d\n", vol->vol_type);
pr_err("\tname_len %d\n", vol->name_len);
pr_err("\tusable_leb_size %d\n", vol->usable_leb_size);
pr_err("\tused_ebs %d\n", vol->used_ebs);
pr_err("\tused_bytes %lld\n", vol->used_bytes);
pr_err("\tlast_eb_bytes %d\n", vol->last_eb_bytes);
pr_err("\tcorrupted %d\n", vol->corrupted);
pr_err("\tupd_marker %d\n", vol->upd_marker);
pr_err("\tskip_check %d\n", vol->skip_check);
if (vol->name_len <= UBI_VOL_NAME_MAX &&
strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
pr_err("\tname %s\n", vol->name);
} else {
pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
vol->name[0], vol->name[1], vol->name[2],
vol->name[3], vol->name[4]);
}
}
/**
* ubi_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
* @r: the object to dump
* @idx: volume table index
*/
void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
{
int name_len = be16_to_cpu(r->name_len);
pr_err("Volume table record %d dump:\n", idx);
pr_err("\treserved_pebs %d\n", be32_to_cpu(r->reserved_pebs));
pr_err("\talignment %d\n", be32_to_cpu(r->alignment));
pr_err("\tdata_pad %d\n", be32_to_cpu(r->data_pad));
pr_err("\tvol_type %d\n", (int)r->vol_type);
pr_err("\tupd_marker %d\n", (int)r->upd_marker);
pr_err("\tname_len %d\n", name_len);
if (r->name[0] == '\0') {
pr_err("\tname NULL\n");
return;
}
if (name_len <= UBI_VOL_NAME_MAX &&
strnlen(&r->name[0], name_len + 1) == name_len) {
pr_err("\tname %s\n", &r->name[0]);
} else {
pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
r->name[0], r->name[1], r->name[2], r->name[3],
r->name[4]);
}
pr_err("\tcrc %#08x\n", be32_to_cpu(r->crc));
}
/**
* ubi_dump_av - dump a &struct ubi_ainf_volume object.
* @av: the object to dump
*/
void ubi_dump_av(const struct ubi_ainf_volume *av)
{
pr_err("Volume attaching information dump:\n");
pr_err("\tvol_id %d\n", av->vol_id);
pr_err("\thighest_lnum %d\n", av->highest_lnum);
pr_err("\tleb_count %d\n", av->leb_count);
pr_err("\tcompat %d\n", av->compat);
pr_err("\tvol_type %d\n", av->vol_type);
pr_err("\tused_ebs %d\n", av->used_ebs);
pr_err("\tlast_data_size %d\n", av->last_data_size);
pr_err("\tdata_pad %d\n", av->data_pad);
}
/**
* ubi_dump_aeb - dump a &struct ubi_ainf_peb object.
* @aeb: the object to dump
* @type: object type: 0 - not corrupted, 1 - corrupted
*/
void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type)
{
pr_err("eraseblock attaching information dump:\n");
pr_err("\tec %d\n", aeb->ec);
pr_err("\tpnum %d\n", aeb->pnum);
if (type == 0) {
pr_err("\tlnum %d\n", aeb->lnum);
pr_err("\tscrub %d\n", aeb->scrub);
pr_err("\tsqnum %llu\n", aeb->sqnum);
}
}
/**
* ubi_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
* @req: the object to dump
*/
void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
{
char nm[17];
pr_err("Volume creation request dump:\n");
pr_err("\tvol_id %d\n", req->vol_id);
pr_err("\talignment %d\n", req->alignment);
pr_err("\tbytes %lld\n", (long long)req->bytes);
pr_err("\tvol_type %d\n", req->vol_type);
pr_err("\tname_len %d\n", req->name_len);
memcpy(nm, req->name, 16);
nm[16] = 0;
pr_err("\t1st 16 characters of name: %s\n", nm);
}
/*
* Root directory for UBI stuff in debugfs. Contains sub-directories which
* contain the stuff specific to particular UBI devices.
*/
static struct dentry *dfs_rootdir;
/**
* ubi_debugfs_init - create UBI debugfs directory.
*
* Create UBI debugfs directory. Returns zero in case of success and a negative
* error code in case of failure.
*/
int ubi_debugfs_init(void)
{
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
dfs_rootdir = debugfs_create_dir("ubi", NULL);
if (IS_ERR_OR_NULL(dfs_rootdir)) {
int err = dfs_rootdir ? PTR_ERR(dfs_rootdir) : -ENODEV;
pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n",
err);
return err;
}
return 0;
}
/**
* ubi_debugfs_exit - remove UBI debugfs directory.
*/
void ubi_debugfs_exit(void)
{
if (IS_ENABLED(CONFIG_DEBUG_FS))
debugfs_remove(dfs_rootdir);
}
/* Read an UBI debugfs file */
static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
unsigned long ubi_num = (unsigned long)file->private_data;
struct dentry *dent = file->f_path.dentry;
struct ubi_device *ubi;
struct ubi_debug_info *d;
char buf[8];
int val;
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
d = &ubi->dbg;
if (dent == d->dfs_chk_gen)
val = d->chk_gen;
else if (dent == d->dfs_chk_io)
val = d->chk_io;
else if (dent == d->dfs_chk_fastmap)
val = d->chk_fastmap;
else if (dent == d->dfs_disable_bgt)
val = d->disable_bgt;
else if (dent == d->dfs_emulate_bitflips)
val = d->emulate_bitflips;
else if (dent == d->dfs_emulate_io_failures)
val = d->emulate_io_failures;
else if (dent == d->dfs_emulate_power_cut) {
snprintf(buf, sizeof(buf), "%u\n", d->emulate_power_cut);
count = simple_read_from_buffer(user_buf, count, ppos,
buf, strlen(buf));
goto out;
} else if (dent == d->dfs_power_cut_min) {
snprintf(buf, sizeof(buf), "%u\n", d->power_cut_min);
count = simple_read_from_buffer(user_buf, count, ppos,
buf, strlen(buf));
goto out;
} else if (dent == d->dfs_power_cut_max) {
snprintf(buf, sizeof(buf), "%u\n", d->power_cut_max);
count = simple_read_from_buffer(user_buf, count, ppos,
buf, strlen(buf));
goto out;
}
else {
count = -EINVAL;
goto out;
}
if (val)
buf[0] = '1';
else
buf[0] = '0';
buf[1] = '\n';
buf[2] = 0x00;
count = simple_read_from_buffer(user_buf, count, ppos, buf, 2);
out:
ubi_put_device(ubi);
return count;
}
/* Write an UBI debugfs file */
static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
unsigned long ubi_num = (unsigned long)file->private_data;
struct dentry *dent = file->f_path.dentry;
struct ubi_device *ubi;
struct ubi_debug_info *d;
size_t buf_size;
char buf[8] = {0};
int val;
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
d = &ubi->dbg;
buf_size = min_t(size_t, count, (sizeof(buf) - 1));
if (copy_from_user(buf, user_buf, buf_size)) {
count = -EFAULT;
goto out;
}
if (dent == d->dfs_power_cut_min) {
if (kstrtouint(buf, 0, &d->power_cut_min) != 0)
count = -EINVAL;
goto out;
} else if (dent == d->dfs_power_cut_max) {
if (kstrtouint(buf, 0, &d->power_cut_max) != 0)
count = -EINVAL;
goto out;
} else if (dent == d->dfs_emulate_power_cut) {
if (kstrtoint(buf, 0, &val) != 0)
count = -EINVAL;
else
d->emulate_power_cut = val;
goto out;
}
if (buf[0] == '1')
val = 1;
else if (buf[0] == '0')
val = 0;
else {
count = -EINVAL;
goto out;
}
if (dent == d->dfs_chk_gen)
d->chk_gen = val;
else if (dent == d->dfs_chk_io)
d->chk_io = val;
else if (dent == d->dfs_chk_fastmap)
d->chk_fastmap = val;
else if (dent == d->dfs_disable_bgt)
d->disable_bgt = val;
else if (dent == d->dfs_emulate_bitflips)
d->emulate_bitflips = val;
else if (dent == d->dfs_emulate_io_failures)
d->emulate_io_failures = val;
else
count = -EINVAL;
out:
ubi_put_device(ubi);
return count;
}
/* File operations for all UBI debugfs files except
* detailed_erase_block_info
*/
static const struct file_operations dfs_fops = {
.read = dfs_file_read,
.write = dfs_file_write,
.open = simple_open,
.llseek = no_llseek,
.owner = THIS_MODULE,
};
/* As long as the position is less then that total number of erase blocks,
* we still have more to print.
*/
static void *eraseblk_count_seq_start(struct seq_file *s, loff_t *pos)
{
struct ubi_device *ubi = s->private;
if (*pos < ubi->peb_count)
return pos;
return NULL;
}
/* Since we are using the position as the iterator, we just need to check if we
* are done and increment the position.
*/
static void *eraseblk_count_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct ubi_device *ubi = s->private;
(*pos)++;
if (*pos < ubi->peb_count)
return pos;
return NULL;
}
static void eraseblk_count_seq_stop(struct seq_file *s, void *v)
{
}
static int eraseblk_count_seq_show(struct seq_file *s, void *iter)
{
struct ubi_device *ubi = s->private;
struct ubi_wl_entry *wl;
int *block_number = iter;
int erase_count = -1;
int err;
/* If this is the start, print a header */
if (*block_number == 0)
seq_puts(s, "physical_block_number\terase_count\n");
err = ubi_io_is_bad(ubi, *block_number);
if (err)
return err;
spin_lock(&ubi->wl_lock);
wl = ubi->lookuptbl[*block_number];
if (wl)
erase_count = wl->ec;
spin_unlock(&ubi->wl_lock);
if (erase_count < 0)
return 0;
seq_printf(s, "%-22d\t%-11d\n", *block_number, erase_count);
return 0;
}
static const struct seq_operations eraseblk_count_seq_ops = {
.start = eraseblk_count_seq_start,
.next = eraseblk_count_seq_next,
.stop = eraseblk_count_seq_stop,
.show = eraseblk_count_seq_show
};
static int eraseblk_count_open(struct inode *inode, struct file *f)
{
struct seq_file *s;
int err;
err = seq_open(f, &eraseblk_count_seq_ops);
if (err)
return err;
s = f->private_data;
s->private = ubi_get_device((unsigned long)inode->i_private);
if (!s->private)
return -ENODEV;
else
return 0;
}
static int eraseblk_count_release(struct inode *inode, struct file *f)
{
struct seq_file *s = f->private_data;
struct ubi_device *ubi = s->private;
ubi_put_device(ubi);
return seq_release(inode, f);
}
static const struct file_operations eraseblk_count_fops = {
.owner = THIS_MODULE,
.open = eraseblk_count_open,
.read = seq_read,
.llseek = seq_lseek,
.release = eraseblk_count_release,
};
/**
* ubi_debugfs_init_dev - initialize debugfs for an UBI device.
* @ubi: UBI device description object
*
* This function creates all debugfs files for UBI device @ubi. Returns zero in
* case of success and a negative error code in case of failure.
*/
int ubi_debugfs_init_dev(struct ubi_device *ubi)
{
unsigned long ubi_num = ubi->ubi_num;
struct ubi_debug_info *d = &ubi->dbg;
umode_t mode = S_IRUSR | S_IWUSR;
int n;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
ubi->ubi_num);
if (n > UBI_DFS_DIR_LEN) {
/* The array size is too small */
return -EINVAL;
}
d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir);
d->dfs_chk_gen = debugfs_create_file("chk_gen", mode, d->dfs_dir,
(void *)ubi_num, &dfs_fops);
d->dfs_chk_io = debugfs_create_file("chk_io", mode, d->dfs_dir,
(void *)ubi_num, &dfs_fops);
d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", mode,
d->dfs_dir, (void *)ubi_num,
&dfs_fops);
d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", mode,
d->dfs_dir, (void *)ubi_num,
&dfs_fops);
d->dfs_emulate_bitflips = debugfs_create_file("tst_emulate_bitflips",
mode, d->dfs_dir,
(void *)ubi_num,
&dfs_fops);
d->dfs_emulate_io_failures = debugfs_create_file("tst_emulate_io_failures",
mode, d->dfs_dir,
(void *)ubi_num,
&dfs_fops);
d->dfs_emulate_power_cut = debugfs_create_file("tst_emulate_power_cut",
mode, d->dfs_dir,
(void *)ubi_num,
&dfs_fops);
d->dfs_power_cut_min = debugfs_create_file("tst_emulate_power_cut_min",
mode, d->dfs_dir,
(void *)ubi_num, &dfs_fops);
d->dfs_power_cut_max = debugfs_create_file("tst_emulate_power_cut_max",
mode, d->dfs_dir,
(void *)ubi_num, &dfs_fops);
debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir,
(void *)ubi_num, &eraseblk_count_fops);
return 0;
}
/**
* ubi_debugfs_exit_dev - free all debugfs files corresponding to device @ubi
* @ubi: UBI device description object
*/
void ubi_debugfs_exit_dev(struct ubi_device *ubi)
{
if (IS_ENABLED(CONFIG_DEBUG_FS))
debugfs_remove_recursive(ubi->dbg.dfs_dir);
}
/**
* ubi_dbg_power_cut - emulate a power cut if it is time to do so
* @ubi: UBI device description object
* @caller: Flags set to indicate from where the function is being called
*
* Returns non-zero if a power cut was emulated, zero if not.
*/
int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
{
unsigned int range;
if ((ubi->dbg.emulate_power_cut & caller) == 0)
return 0;
if (ubi->dbg.power_cut_counter == 0) {
ubi->dbg.power_cut_counter = ubi->dbg.power_cut_min;
if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) {
range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min;
ubi->dbg.power_cut_counter += get_random_u32_below(range);
}
return 0;
}
ubi->dbg.power_cut_counter--;
if (ubi->dbg.power_cut_counter)
return 0;
ubi_msg(ubi, "XXXXXXXXXXXXXXX emulating a power cut XXXXXXXXXXXXXXXX");
ubi_ro_mode(ubi);
return 1;
}
| linux-master | drivers/mtd/ubi/debug.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) International Business Machines Corp., 2006
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/* This file mostly implements UBI kernel API functions */
#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/fs.h>
#include <asm/div64.h>
#include "ubi.h"
/**
* ubi_do_get_device_info - get information about UBI device.
* @ubi: UBI device description object
* @di: the information is stored here
*
* This function is the same as 'ubi_get_device_info()', but it assumes the UBI
* device is locked and cannot disappear.
*/
void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di)
{
di->ubi_num = ubi->ubi_num;
di->leb_size = ubi->leb_size;
di->leb_start = ubi->leb_start;
di->min_io_size = ubi->min_io_size;
di->max_write_size = ubi->max_write_size;
di->ro_mode = ubi->ro_mode;
di->cdev = ubi->cdev.dev;
}
EXPORT_SYMBOL_GPL(ubi_do_get_device_info);
/**
* ubi_get_device_info - get information about UBI device.
* @ubi_num: UBI device number
* @di: the information is stored here
*
* This function returns %0 in case of success, %-EINVAL if the UBI device
* number is invalid, and %-ENODEV if there is no such UBI device.
*/
int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
{
struct ubi_device *ubi;
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
return -EINVAL;
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
ubi_do_get_device_info(ubi, di);
ubi_put_device(ubi);
return 0;
}
EXPORT_SYMBOL_GPL(ubi_get_device_info);
/**
* ubi_do_get_volume_info - get information about UBI volume.
* @ubi: UBI device description object
* @vol: volume description object
* @vi: the information is stored here
*/
void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
struct ubi_volume_info *vi)
{
vi->vol_id = vol->vol_id;
vi->ubi_num = ubi->ubi_num;
vi->size = vol->reserved_pebs;
vi->used_bytes = vol->used_bytes;
vi->vol_type = vol->vol_type;
vi->corrupted = vol->corrupted;
vi->upd_marker = vol->upd_marker;
vi->alignment = vol->alignment;
vi->usable_leb_size = vol->usable_leb_size;
vi->name_len = vol->name_len;
vi->name = vol->name;
vi->cdev = vol->cdev.dev;
vi->dev = &vol->dev;
}
/**
* ubi_get_volume_info - get information about UBI volume.
* @desc: volume descriptor
* @vi: the information is stored here
*/
void ubi_get_volume_info(struct ubi_volume_desc *desc,
struct ubi_volume_info *vi)
{
ubi_do_get_volume_info(desc->vol->ubi, desc->vol, vi);
}
EXPORT_SYMBOL_GPL(ubi_get_volume_info);
/**
* ubi_open_volume - open UBI volume.
* @ubi_num: UBI device number
* @vol_id: volume ID
* @mode: open mode
*
* The @mode parameter specifies if the volume should be opened in read-only
* mode, read-write mode, or exclusive mode. The exclusive mode guarantees that
* nobody else will be able to open this volume. UBI allows to have many volume
* readers and one writer at a time.
*
* If a static volume is being opened for the first time since boot, it will be
* checked by this function, which means it will be fully read and the CRC
* checksum of each logical eraseblock will be checked.
*
* This function returns volume descriptor in case of success and a negative
* error code in case of failure.
*/
struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
{
int err;
struct ubi_volume_desc *desc;
struct ubi_device *ubi;
struct ubi_volume *vol;
dbg_gen("open device %d, volume %d, mode %d", ubi_num, vol_id, mode);
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
return ERR_PTR(-EINVAL);
if (mode != UBI_READONLY && mode != UBI_READWRITE &&
mode != UBI_EXCLUSIVE && mode != UBI_METAONLY)
return ERR_PTR(-EINVAL);
/*
* First of all, we have to get the UBI device to prevent its removal.
*/
ubi = ubi_get_device(ubi_num);
if (!ubi)
return ERR_PTR(-ENODEV);
if (vol_id < 0 || vol_id >= ubi->vtbl_slots) {
err = -EINVAL;
goto out_put_ubi;
}
desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL);
if (!desc) {
err = -ENOMEM;
goto out_put_ubi;
}
err = -ENODEV;
if (!try_module_get(THIS_MODULE))
goto out_free;
spin_lock(&ubi->volumes_lock);
vol = ubi->volumes[vol_id];
if (!vol)
goto out_unlock;
err = -EBUSY;
switch (mode) {
case UBI_READONLY:
if (vol->exclusive)
goto out_unlock;
vol->readers += 1;
break;
case UBI_READWRITE:
if (vol->exclusive || vol->writers > 0)
goto out_unlock;
vol->writers += 1;
break;
case UBI_EXCLUSIVE:
if (vol->exclusive || vol->writers || vol->readers ||
vol->metaonly)
goto out_unlock;
vol->exclusive = 1;
break;
case UBI_METAONLY:
if (vol->metaonly || vol->exclusive)
goto out_unlock;
vol->metaonly = 1;
break;
}
get_device(&vol->dev);
vol->ref_count += 1;
spin_unlock(&ubi->volumes_lock);
desc->vol = vol;
desc->mode = mode;
mutex_lock(&ubi->ckvol_mutex);
if (!vol->checked && !vol->skip_check) {
/* This is the first open - check the volume */
err = ubi_check_volume(ubi, vol_id);
if (err < 0) {
mutex_unlock(&ubi->ckvol_mutex);
ubi_close_volume(desc);
return ERR_PTR(err);
}
if (err == 1) {
ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
vol_id, ubi->ubi_num);
vol->corrupted = 1;
}
vol->checked = 1;
}
mutex_unlock(&ubi->ckvol_mutex);
return desc;
out_unlock:
spin_unlock(&ubi->volumes_lock);
module_put(THIS_MODULE);
out_free:
kfree(desc);
out_put_ubi:
ubi_err(ubi, "cannot open device %d, volume %d, error %d",
ubi_num, vol_id, err);
ubi_put_device(ubi);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(ubi_open_volume);
/**
* ubi_open_volume_nm - open UBI volume by name.
* @ubi_num: UBI device number
* @name: volume name
* @mode: open mode
*
* This function is similar to 'ubi_open_volume()', but opens a volume by name.
*/
struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
int mode)
{
int i, vol_id = -1, len;
struct ubi_device *ubi;
struct ubi_volume_desc *ret;
dbg_gen("open device %d, volume %s, mode %d", ubi_num, name, mode);
if (!name)
return ERR_PTR(-EINVAL);
len = strnlen(name, UBI_VOL_NAME_MAX + 1);
if (len > UBI_VOL_NAME_MAX)
return ERR_PTR(-EINVAL);
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
return ERR_PTR(-EINVAL);
ubi = ubi_get_device(ubi_num);
if (!ubi)
return ERR_PTR(-ENODEV);
spin_lock(&ubi->volumes_lock);
/* Walk all volumes of this UBI device */
for (i = 0; i < ubi->vtbl_slots; i++) {
struct ubi_volume *vol = ubi->volumes[i];
if (vol && len == vol->name_len && !strcmp(name, vol->name)) {
vol_id = i;
break;
}
}
spin_unlock(&ubi->volumes_lock);
if (vol_id >= 0)
ret = ubi_open_volume(ubi_num, vol_id, mode);
else
ret = ERR_PTR(-ENODEV);
/*
* We should put the UBI device even in case of success, because
* 'ubi_open_volume()' took a reference as well.
*/
ubi_put_device(ubi);
return ret;
}
EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
/**
* ubi_open_volume_path - open UBI volume by its character device node path.
* @pathname: volume character device node path
* @mode: open mode
*
* This function is similar to 'ubi_open_volume()', but opens a volume the path
* to its character device node.
*/
struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
{
int error, ubi_num, vol_id;
struct path path;
struct kstat stat;
dbg_gen("open volume %s, mode %d", pathname, mode);
if (!pathname || !*pathname)
return ERR_PTR(-EINVAL);
error = kern_path(pathname, LOOKUP_FOLLOW, &path);
if (error)
return ERR_PTR(error);
error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
path_put(&path);
if (error)
return ERR_PTR(error);
if (!S_ISCHR(stat.mode))
return ERR_PTR(-EINVAL);
ubi_num = ubi_major2num(MAJOR(stat.rdev));
vol_id = MINOR(stat.rdev) - 1;
if (vol_id >= 0 && ubi_num >= 0)
return ubi_open_volume(ubi_num, vol_id, mode);
return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_GPL(ubi_open_volume_path);
/**
* ubi_close_volume - close UBI volume.
* @desc: volume descriptor
*/
void ubi_close_volume(struct ubi_volume_desc *desc)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
dbg_gen("close device %d, volume %d, mode %d",
ubi->ubi_num, vol->vol_id, desc->mode);
spin_lock(&ubi->volumes_lock);
switch (desc->mode) {
case UBI_READONLY:
vol->readers -= 1;
break;
case UBI_READWRITE:
vol->writers -= 1;
break;
case UBI_EXCLUSIVE:
vol->exclusive = 0;
break;
case UBI_METAONLY:
vol->metaonly = 0;
break;
}
vol->ref_count -= 1;
spin_unlock(&ubi->volumes_lock);
kfree(desc);
put_device(&vol->dev);
ubi_put_device(ubi);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(ubi_close_volume);
/**
* leb_read_sanity_check - does sanity checks on read requests.
* @desc: volume descriptor
* @lnum: logical eraseblock number to read from
* @offset: offset within the logical eraseblock to read from
* @len: how many bytes to read
*
* This function is used by ubi_leb_read() and ubi_leb_read_sg()
* to perform sanity checks.
*/
static int leb_read_sanity_check(struct ubi_volume_desc *desc, int lnum,
int offset, int len)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int vol_id = vol->vol_id;
if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
lnum >= vol->used_ebs || offset < 0 || len < 0 ||
offset + len > vol->usable_leb_size)
return -EINVAL;
if (vol->vol_type == UBI_STATIC_VOLUME) {
if (vol->used_ebs == 0)
/* Empty static UBI volume */
return 0;
if (lnum == vol->used_ebs - 1 &&
offset + len > vol->last_eb_bytes)
return -EINVAL;
}
if (vol->upd_marker)
return -EBADF;
return 0;
}
/**
* ubi_leb_read - read data.
* @desc: volume descriptor
* @lnum: logical eraseblock number to read from
* @buf: buffer where to store the read data
* @offset: offset within the logical eraseblock to read from
* @len: how many bytes to read
* @check: whether UBI has to check the read data's CRC or not.
*
* This function reads data from offset @offset of logical eraseblock @lnum and
* stores the data at @buf. When reading from static volumes, @check specifies
* whether the data has to be checked or not. If yes, the whole logical
* eraseblock will be read and its CRC checksum will be checked (i.e., the CRC
* checksum is per-eraseblock). So checking may substantially slow down the
* read speed. The @check argument is ignored for dynamic volumes.
*
* In case of success, this function returns zero. In case of failure, this
* function returns a negative error code.
*
* %-EBADMSG error code is returned:
* o for both static and dynamic volumes if MTD driver has detected a data
* integrity problem (unrecoverable ECC checksum mismatch in case of NAND);
* o for static volumes in case of data CRC mismatch.
*
* If the volume is damaged because of an interrupted update this function just
* returns immediately with %-EBADF error code.
*/
int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
int len, int check)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int err, vol_id = vol->vol_id;
dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
err = leb_read_sanity_check(desc, lnum, offset, len);
if (err < 0)
return err;
if (len == 0)
return 0;
err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
vol->corrupted = 1;
}
return err;
}
EXPORT_SYMBOL_GPL(ubi_leb_read);
/**
* ubi_leb_read_sg - read data into a scatter gather list.
* @desc: volume descriptor
* @lnum: logical eraseblock number to read from
* @sgl: UBI scatter gather list to store the read data
* @offset: offset within the logical eraseblock to read from
* @len: how many bytes to read
* @check: whether UBI has to check the read data's CRC or not.
*
* This function works exactly like ubi_leb_read_sg(). But instead of
* storing the read data into a buffer it writes to an UBI scatter gather
* list.
*/
int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl,
int offset, int len, int check)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int err, vol_id = vol->vol_id;
dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
err = leb_read_sanity_check(desc, lnum, offset, len);
if (err < 0)
return err;
if (len == 0)
return 0;
err = ubi_eba_read_leb_sg(ubi, vol, sgl, lnum, offset, len, check);
if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
vol->corrupted = 1;
}
return err;
}
EXPORT_SYMBOL_GPL(ubi_leb_read_sg);
/**
* ubi_leb_write - write data.
* @desc: volume descriptor
* @lnum: logical eraseblock number to write to
* @buf: data to write
* @offset: offset within the logical eraseblock where to write
* @len: how many bytes to write
*
* This function writes @len bytes of data from @buf to offset @offset of
* logical eraseblock @lnum.
*
* This function takes care of physical eraseblock write failures. If write to
* the physical eraseblock write operation fails, the logical eraseblock is
* re-mapped to another physical eraseblock, the data is recovered, and the
* write finishes. UBI has a pool of reserved physical eraseblocks for this.
*
* If all the data were successfully written, zero is returned. If an error
* occurred and UBI has not been able to recover from it, this function returns
* a negative error code. Note, in case of an error, it is possible that
* something was still written to the flash media, but that may be some
* garbage.
*
* If the volume is damaged because of an interrupted update this function just
* returns immediately with %-EBADF code.
*/
int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
int offset, int len)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int vol_id = vol->vol_id;
dbg_gen("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset);
if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
return -EINVAL;
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
if (!ubi_leb_valid(vol, lnum) || offset < 0 || len < 0 ||
offset + len > vol->usable_leb_size ||
offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
return -EINVAL;
if (vol->upd_marker)
return -EBADF;
if (len == 0)
return 0;
return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len);
}
EXPORT_SYMBOL_GPL(ubi_leb_write);
/*
* ubi_leb_change - change logical eraseblock atomically.
* @desc: volume descriptor
* @lnum: logical eraseblock number to change
* @buf: data to write
* @len: how many bytes to write
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
* data, which has to be aligned. The length may be shorter than the logical
* eraseblock size, ant the logical eraseblock may be appended to more times
* later on. This function guarantees that in case of an unclean reboot the old
* contents is preserved. Returns zero in case of success and a negative error
* code in case of failure.
*/
int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
int len)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int vol_id = vol->vol_id;
dbg_gen("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum);
if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
return -EINVAL;
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
if (!ubi_leb_valid(vol, lnum) || len < 0 ||
len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
return -EINVAL;
if (vol->upd_marker)
return -EBADF;
if (len == 0)
return 0;
return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len);
}
EXPORT_SYMBOL_GPL(ubi_leb_change);
/**
* ubi_leb_erase - erase logical eraseblock.
* @desc: volume descriptor
* @lnum: logical eraseblock number
*
* This function un-maps logical eraseblock @lnum and synchronously erases the
* correspondent physical eraseblock. Returns zero in case of success and a
* negative error code in case of failure.
*
* If the volume is damaged because of an interrupted update this function just
* returns immediately with %-EBADF code.
*/
int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int err;
dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
if (!ubi_leb_valid(vol, lnum))
return -EINVAL;
if (vol->upd_marker)
return -EBADF;
err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
return err;
return ubi_wl_flush(ubi, vol->vol_id, lnum);
}
EXPORT_SYMBOL_GPL(ubi_leb_erase);
/**
* ubi_leb_unmap - un-map logical eraseblock.
* @desc: volume descriptor
* @lnum: logical eraseblock number
*
* This function un-maps logical eraseblock @lnum and schedules the
* corresponding physical eraseblock for erasure, so that it will eventually be
* physically erased in background. This operation is much faster than the
* erase operation.
*
* Unlike erase, the un-map operation does not guarantee that the logical
* eraseblock will contain all 0xFF bytes when UBI is initialized again. For
* example, if several logical eraseblocks are un-mapped, and an unclean reboot
* happens after this, the logical eraseblocks will not necessarily be
* un-mapped again when this MTD device is attached. They may actually be
* mapped to the same physical eraseblocks again. So, this function has to be
* used with care.
*
* In other words, when un-mapping a logical eraseblock, UBI does not store
* any information about this on the flash media, it just marks the logical
* eraseblock as "un-mapped" in RAM. If UBI is detached before the physical
* eraseblock is physically erased, it will be mapped again to the same logical
* eraseblock when the MTD device is attached again.
*
* The main and obvious use-case of this function is when the contents of a
* logical eraseblock has to be re-written. Then it is much more efficient to
* first un-map it, then write new data, rather than first erase it, then write
* new data. Note, once new data has been written to the logical eraseblock,
* UBI guarantees that the old contents has gone forever. In other words, if an
* unclean reboot happens after the logical eraseblock has been un-mapped and
* then written to, it will contain the last written data.
*
* This function returns zero in case of success and a negative error code in
* case of failure. If the volume is damaged because of an interrupted update
* this function just returns immediately with %-EBADF code.
*/
int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum);
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
if (!ubi_leb_valid(vol, lnum))
return -EINVAL;
if (vol->upd_marker)
return -EBADF;
return ubi_eba_unmap_leb(ubi, vol, lnum);
}
EXPORT_SYMBOL_GPL(ubi_leb_unmap);
/**
* ubi_leb_map - map logical eraseblock to a physical eraseblock.
* @desc: volume descriptor
* @lnum: logical eraseblock number
*
* This function maps an un-mapped logical eraseblock @lnum to a physical
* eraseblock. This means, that after a successful invocation of this
* function the logical eraseblock @lnum will be empty (contain only %0xFF
* bytes) and be mapped to a physical eraseblock, even if an unclean reboot
* happens.
*
* This function returns zero in case of success, %-EBADF if the volume is
* damaged because of an interrupted update, %-EBADMSG if the logical
* eraseblock is already mapped, and other negative error codes in case of
* other failures.
*/
int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
dbg_gen("map LEB %d:%d", vol->vol_id, lnum);
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
if (!ubi_leb_valid(vol, lnum))
return -EINVAL;
if (vol->upd_marker)
return -EBADF;
if (ubi_eba_is_mapped(vol, lnum))
return -EBADMSG;
return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
}
EXPORT_SYMBOL_GPL(ubi_leb_map);
/**
* ubi_is_mapped - check if logical eraseblock is mapped.
* @desc: volume descriptor
* @lnum: logical eraseblock number
*
* This function checks if logical eraseblock @lnum is mapped to a physical
* eraseblock. If a logical eraseblock is un-mapped, this does not necessarily
* mean it will still be un-mapped after the UBI device is re-attached. The
* logical eraseblock may become mapped to the physical eraseblock it was last
* mapped to.
*
* This function returns %1 if the LEB is mapped, %0 if not, and a negative
* error code in case of failure. If the volume is damaged because of an
* interrupted update this function just returns immediately with %-EBADF error
* code.
*/
int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum)
{
struct ubi_volume *vol = desc->vol;
dbg_gen("test LEB %d:%d", vol->vol_id, lnum);
if (!ubi_leb_valid(vol, lnum))
return -EINVAL;
if (vol->upd_marker)
return -EBADF;
return ubi_eba_is_mapped(vol, lnum);
}
EXPORT_SYMBOL_GPL(ubi_is_mapped);
/**
* ubi_sync - synchronize UBI device buffers.
* @ubi_num: UBI device to synchronize
*
* The underlying MTD device may cache data in hardware or in software. This
* function ensures the caches are flushed. Returns zero in case of success and
* a negative error code in case of failure.
*/
int ubi_sync(int ubi_num)
{
struct ubi_device *ubi;
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
mtd_sync(ubi->mtd);
ubi_put_device(ubi);
return 0;
}
EXPORT_SYMBOL_GPL(ubi_sync);
/**
* ubi_flush - flush UBI work queue.
* @ubi_num: UBI device to flush work queue
* @vol_id: volume id to flush for
* @lnum: logical eraseblock number to flush for
*
* This function executes all pending works for a particular volume id / logical
* eraseblock number pair. If either value is set to %UBI_ALL, then it acts as
* a wildcard for all of the corresponding volume numbers or logical
* eraseblock numbers. It returns zero in case of success and a negative error
* code in case of failure.
*/
int ubi_flush(int ubi_num, int vol_id, int lnum)
{
struct ubi_device *ubi;
int err = 0;
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
err = ubi_wl_flush(ubi, vol_id, lnum);
ubi_put_device(ubi);
return err;
}
EXPORT_SYMBOL_GPL(ubi_flush);
BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
/**
* ubi_register_volume_notifier - register a volume notifier.
* @nb: the notifier description object
* @ignore_existing: if non-zero, do not send "added" notification for all
* already existing volumes
*
* This function registers a volume notifier, which means that
* 'nb->notifier_call()' will be invoked when an UBI volume is created,
* removed, re-sized, re-named, or updated. The first argument of the function
* is the notification type. The second argument is pointer to a
* &struct ubi_notification object which describes the notification event.
* Using UBI API from the volume notifier is prohibited.
*
* This function returns zero in case of success and a negative error code
* in case of failure.
*/
int ubi_register_volume_notifier(struct notifier_block *nb,
int ignore_existing)
{
int err;
err = blocking_notifier_chain_register(&ubi_notifiers, nb);
if (err != 0)
return err;
if (ignore_existing)
return 0;
/*
* We are going to walk all UBI devices and all volumes, and
* notify the user about existing volumes by the %UBI_VOLUME_ADDED
* event. We have to lock the @ubi_devices_mutex to make sure UBI
* devices do not disappear.
*/
mutex_lock(&ubi_devices_mutex);
ubi_enumerate_volumes(nb);
mutex_unlock(&ubi_devices_mutex);
return err;
}
EXPORT_SYMBOL_GPL(ubi_register_volume_notifier);
/**
* ubi_unregister_volume_notifier - unregister the volume notifier.
* @nb: the notifier description object
*
* This function unregisters volume notifier @nm and returns zero in case of
* success and a negative error code in case of failure.
*/
int ubi_unregister_volume_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&ubi_notifiers, nb);
}
EXPORT_SYMBOL_GPL(ubi_unregister_volume_notifier);
| linux-master | drivers/mtd/ubi/kapi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) International Business Machines Corp., 2006
* Copyright (c) Nokia Corporation, 2007
*
* Author: Artem Bityutskiy (Битюцкий Артём),
* Frank Haverkamp
*/
/*
* This file includes UBI initialization and building of UBI devices.
*
* When UBI is initialized, it attaches all the MTD devices specified as the
* module load parameters or the kernel boot parameters. If MTD devices were
* specified, UBI does not attach any MTD device, but it is possible to do
* later using the "UBI control device".
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/stringify.h>
#include <linux/namei.h>
#include <linux/stat.h>
#include <linux/miscdevice.h>
#include <linux/mtd/partitions.h>
#include <linux/log2.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/major.h>
#include "ubi.h"
/* Maximum length of the 'mtd=' parameter */
#define MTD_PARAM_LEN_MAX 64
/* Maximum number of comma-separated items in the 'mtd=' parameter */
#define MTD_PARAM_MAX_COUNT 5
/* Maximum value for the number of bad PEBs per 1024 PEBs */
#define MAX_MTD_UBI_BEB_LIMIT 768
#ifdef CONFIG_MTD_UBI_MODULE
#define ubi_is_module() 1
#else
#define ubi_is_module() 0
#endif
/**
* struct mtd_dev_param - MTD device parameter description data structure.
* @name: MTD character device node path, MTD device name, or MTD device number
* string
* @ubi_num: UBI number
* @vid_hdr_offs: VID header offset
* @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
* @enable_fm: enable fastmap when value is non-zero
*/
struct mtd_dev_param {
char name[MTD_PARAM_LEN_MAX];
int ubi_num;
int vid_hdr_offs;
int max_beb_per1024;
int enable_fm;
};
/* Numbers of elements set in the @mtd_dev_param array */
static int mtd_devs;
/* MTD devices specification parameters */
static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
#ifdef CONFIG_MTD_UBI_FASTMAP
/* UBI module parameter to enable fastmap automatically on non-fastmap images */
static bool fm_autoconvert;
static bool fm_debug;
#endif
/* Slab cache for wear-leveling entries */
struct kmem_cache *ubi_wl_entry_slab;
/* UBI control character device */
static struct miscdevice ubi_ctrl_cdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "ubi_ctrl",
.fops = &ubi_ctrl_cdev_operations,
};
/* All UBI devices in system */
static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
/* Serializes UBI devices creations and removals */
DEFINE_MUTEX(ubi_devices_mutex);
/* Protects @ubi_devices and @ubi->ref_count */
static DEFINE_SPINLOCK(ubi_devices_lock);
/* "Show" method for files in '/<sysfs>/class/ubi/' */
/* UBI version attribute ('/<sysfs>/class/ubi/version') */
static ssize_t version_show(const struct class *class, const struct class_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", UBI_VERSION);
}
static CLASS_ATTR_RO(version);
static struct attribute *ubi_class_attrs[] = {
&class_attr_version.attr,
NULL,
};
ATTRIBUTE_GROUPS(ubi_class);
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
struct class ubi_class = {
.name = UBI_NAME_STR,
.class_groups = ubi_class_groups,
};
static ssize_t dev_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf);
/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
static struct device_attribute dev_eraseblock_size =
__ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_avail_eraseblocks =
__ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_total_eraseblocks =
__ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_volumes_count =
__ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_max_ec =
__ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_reserved_for_bad =
__ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_bad_peb_count =
__ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_max_vol_count =
__ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_min_io_size =
__ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_bgt_enabled =
__ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_mtd_num =
__ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_ro_mode =
__ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL);
/**
* ubi_volume_notify - send a volume change notification.
* @ubi: UBI device description object
* @vol: volume description object of the changed volume
* @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
*
* This is a helper function which notifies all subscribers about a volume
* change event (creation, removal, re-sizing, re-naming, updating). Returns
* zero in case of success and a negative error code in case of failure.
*/
int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
{
int ret;
struct ubi_notification nt;
ubi_do_get_device_info(ubi, &nt.di);
ubi_do_get_volume_info(ubi, vol, &nt.vi);
switch (ntype) {
case UBI_VOLUME_ADDED:
case UBI_VOLUME_REMOVED:
case UBI_VOLUME_RESIZED:
case UBI_VOLUME_RENAMED:
ret = ubi_update_fastmap(ubi);
if (ret)
ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
}
return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
}
/**
* ubi_notify_all - send a notification to all volumes.
* @ubi: UBI device description object
* @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
* @nb: the notifier to call
*
* This function walks all volumes of UBI device @ubi and sends the @ntype
* notification for each volume. If @nb is %NULL, then all registered notifiers
* are called, otherwise only the @nb notifier is called. Returns the number of
* sent notifications.
*/
int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
{
struct ubi_notification nt;
int i, count = 0;
ubi_do_get_device_info(ubi, &nt.di);
mutex_lock(&ubi->device_mutex);
for (i = 0; i < ubi->vtbl_slots; i++) {
/*
* Since the @ubi->device is locked, and we are not going to
* change @ubi->volumes, we do not have to lock
* @ubi->volumes_lock.
*/
if (!ubi->volumes[i])
continue;
ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
if (nb)
nb->notifier_call(nb, ntype, &nt);
else
blocking_notifier_call_chain(&ubi_notifiers, ntype,
&nt);
count += 1;
}
mutex_unlock(&ubi->device_mutex);
return count;
}
/**
* ubi_enumerate_volumes - send "add" notification for all existing volumes.
* @nb: the notifier to call
*
* This function walks all UBI devices and volumes and sends the
* %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
* registered notifiers are called, otherwise only the @nb notifier is called.
* Returns the number of sent notifications.
*/
int ubi_enumerate_volumes(struct notifier_block *nb)
{
int i, count = 0;
/*
* Since the @ubi_devices_mutex is locked, and we are not going to
* change @ubi_devices, we do not have to lock @ubi_devices_lock.
*/
for (i = 0; i < UBI_MAX_DEVICES; i++) {
struct ubi_device *ubi = ubi_devices[i];
if (!ubi)
continue;
count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
}
return count;
}
/**
* ubi_get_device - get UBI device.
* @ubi_num: UBI device number
*
* This function returns UBI device description object for UBI device number
* @ubi_num, or %NULL if the device does not exist. This function increases the
* device reference count to prevent removal of the device. In other words, the
* device cannot be removed if its reference count is not zero.
*/
struct ubi_device *ubi_get_device(int ubi_num)
{
struct ubi_device *ubi;
spin_lock(&ubi_devices_lock);
ubi = ubi_devices[ubi_num];
if (ubi) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
get_device(&ubi->dev);
}
spin_unlock(&ubi_devices_lock);
return ubi;
}
/**
* ubi_put_device - drop an UBI device reference.
* @ubi: UBI device description object
*/
void ubi_put_device(struct ubi_device *ubi)
{
spin_lock(&ubi_devices_lock);
ubi->ref_count -= 1;
put_device(&ubi->dev);
spin_unlock(&ubi_devices_lock);
}
/**
* ubi_get_by_major - get UBI device by character device major number.
* @major: major number
*
* This function is similar to 'ubi_get_device()', but it searches the device
* by its major number.
*/
struct ubi_device *ubi_get_by_major(int major)
{
int i;
struct ubi_device *ubi;
spin_lock(&ubi_devices_lock);
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
if (ubi && MAJOR(ubi->cdev.dev) == major) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
get_device(&ubi->dev);
spin_unlock(&ubi_devices_lock);
return ubi;
}
}
spin_unlock(&ubi_devices_lock);
return NULL;
}
/**
* ubi_major2num - get UBI device number by character device major number.
* @major: major number
*
* This function searches UBI device number object by its major number. If UBI
* device was not found, this function returns -ENODEV, otherwise the UBI device
* number is returned.
*/
int ubi_major2num(int major)
{
int i, ubi_num = -ENODEV;
spin_lock(&ubi_devices_lock);
for (i = 0; i < UBI_MAX_DEVICES; i++) {
struct ubi_device *ubi = ubi_devices[i];
if (ubi && MAJOR(ubi->cdev.dev) == major) {
ubi_num = ubi->ubi_num;
break;
}
}
spin_unlock(&ubi_devices_lock);
return ubi_num;
}
/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
static ssize_t dev_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t ret;
struct ubi_device *ubi;
/*
* The below code looks weird, but it actually makes sense. We get the
* UBI device reference from the contained 'struct ubi_device'. But it
* is unclear if the device was removed or not yet. Indeed, if the
* device was removed before we increased its reference count,
* 'ubi_get_device()' will return -ENODEV and we fail.
*
* Remember, 'struct ubi_device' is freed in the release function, so
* we still can use 'ubi->ubi_num'.
*/
ubi = container_of(dev, struct ubi_device, dev);
if (attr == &dev_eraseblock_size)
ret = sprintf(buf, "%d\n", ubi->leb_size);
else if (attr == &dev_avail_eraseblocks)
ret = sprintf(buf, "%d\n", ubi->avail_pebs);
else if (attr == &dev_total_eraseblocks)
ret = sprintf(buf, "%d\n", ubi->good_peb_count);
else if (attr == &dev_volumes_count)
ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
else if (attr == &dev_max_ec)
ret = sprintf(buf, "%d\n", ubi->max_ec);
else if (attr == &dev_reserved_for_bad)
ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
else if (attr == &dev_bad_peb_count)
ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
else if (attr == &dev_max_vol_count)
ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
else if (attr == &dev_min_io_size)
ret = sprintf(buf, "%d\n", ubi->min_io_size);
else if (attr == &dev_bgt_enabled)
ret = sprintf(buf, "%d\n", ubi->thread_enabled);
else if (attr == &dev_mtd_num)
ret = sprintf(buf, "%d\n", ubi->mtd->index);
else if (attr == &dev_ro_mode)
ret = sprintf(buf, "%d\n", ubi->ro_mode);
else
ret = -EINVAL;
return ret;
}
static struct attribute *ubi_dev_attrs[] = {
&dev_eraseblock_size.attr,
&dev_avail_eraseblocks.attr,
&dev_total_eraseblocks.attr,
&dev_volumes_count.attr,
&dev_max_ec.attr,
&dev_reserved_for_bad.attr,
&dev_bad_peb_count.attr,
&dev_max_vol_count.attr,
&dev_min_io_size.attr,
&dev_bgt_enabled.attr,
&dev_mtd_num.attr,
&dev_ro_mode.attr,
NULL
};
ATTRIBUTE_GROUPS(ubi_dev);
static void dev_release(struct device *dev)
{
struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
kfree(ubi);
}
/**
* kill_volumes - destroy all user volumes.
* @ubi: UBI device description object
*/
static void kill_volumes(struct ubi_device *ubi)
{
int i;
for (i = 0; i < ubi->vtbl_slots; i++)
if (ubi->volumes[i])
ubi_free_volume(ubi, ubi->volumes[i]);
}
/**
* uif_init - initialize user interfaces for an UBI device.
* @ubi: UBI device description object
*
* This function initializes various user interfaces for an UBI device. If the
* initialization fails at an early stage, this function frees all the
* resources it allocated, returns an error.
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
static int uif_init(struct ubi_device *ubi)
{
int i, err;
dev_t dev;
sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
/*
* Major numbers for the UBI character devices are allocated
* dynamically. Major numbers of volume character devices are
* equivalent to ones of the corresponding UBI character device. Minor
* numbers of UBI character devices are 0, while minor numbers of
* volume character devices start from 1. Thus, we allocate one major
* number and ubi->vtbl_slots + 1 minor numbers.
*/
err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
if (err) {
ubi_err(ubi, "cannot register UBI character devices");
return err;
}
ubi->dev.devt = dev;
ubi_assert(MINOR(dev) == 0);
cdev_init(&ubi->cdev, &ubi_cdev_operations);
dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
ubi->cdev.owner = THIS_MODULE;
dev_set_name(&ubi->dev, UBI_NAME_STR "%d", ubi->ubi_num);
err = cdev_device_add(&ubi->cdev, &ubi->dev);
if (err)
goto out_unreg;
for (i = 0; i < ubi->vtbl_slots; i++)
if (ubi->volumes[i]) {
err = ubi_add_volume(ubi, ubi->volumes[i]);
if (err) {
ubi_err(ubi, "cannot add volume %d", i);
ubi->volumes[i] = NULL;
goto out_volumes;
}
}
return 0;
out_volumes:
kill_volumes(ubi);
cdev_device_del(&ubi->cdev, &ubi->dev);
out_unreg:
unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
ubi_err(ubi, "cannot initialize UBI %s, error %d",
ubi->ubi_name, err);
return err;
}
/**
* uif_close - close user interfaces for an UBI device.
* @ubi: UBI device description object
*
* Note, since this function un-registers UBI volume device objects (@vol->dev),
* the memory allocated voe the volumes is freed as well (in the release
* function).
*/
static void uif_close(struct ubi_device *ubi)
{
kill_volumes(ubi);
cdev_device_del(&ubi->cdev, &ubi->dev);
unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
}
/**
* ubi_free_volumes_from - free volumes from specific index.
* @ubi: UBI device description object
* @from: the start index used for volume free.
*/
static void ubi_free_volumes_from(struct ubi_device *ubi, int from)
{
int i;
for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
if (!ubi->volumes[i])
continue;
ubi_eba_replace_table(ubi->volumes[i], NULL);
ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
kfree(ubi->volumes[i]);
ubi->volumes[i] = NULL;
}
}
/**
* ubi_free_all_volumes - free all volumes.
* @ubi: UBI device description object
*/
void ubi_free_all_volumes(struct ubi_device *ubi)
{
ubi_free_volumes_from(ubi, 0);
}
/**
* ubi_free_internal_volumes - free internal volumes.
* @ubi: UBI device description object
*/
void ubi_free_internal_volumes(struct ubi_device *ubi)
{
ubi_free_volumes_from(ubi, ubi->vtbl_slots);
}
static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
{
int limit, device_pebs;
uint64_t device_size;
if (!max_beb_per1024) {
/*
* Since max_beb_per1024 has not been set by the user in either
* the cmdline or Kconfig, use mtd_max_bad_blocks to set the
* limit if it is supported by the device.
*/
limit = mtd_max_bad_blocks(ubi->mtd, 0, ubi->mtd->size);
if (limit < 0)
return 0;
return limit;
}
/*
* Here we are using size of the entire flash chip and
* not just the MTD partition size because the maximum
* number of bad eraseblocks is a percentage of the
* whole device and bad eraseblocks are not fairly
* distributed over the flash chip. So the worst case
* is that all the bad eraseblocks of the chip are in
* the MTD partition we are attaching (ubi->mtd).
*/
device_size = mtd_get_device_size(ubi->mtd);
device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
limit = mult_frac(device_pebs, max_beb_per1024, 1024);
/* Round it up */
if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
limit += 1;
return limit;
}
/**
* io_init - initialize I/O sub-system for a given UBI device.
* @ubi: UBI device description object
* @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
*
* If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
* assumed:
* o EC header is always at offset zero - this cannot be changed;
* o VID header starts just after the EC header at the closest address
* aligned to @io->hdrs_min_io_size;
* o data starts just after the VID header at the closest address aligned to
* @io->min_io_size
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
static int io_init(struct ubi_device *ubi, int max_beb_per1024)
{
dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
if (ubi->mtd->numeraseregions != 0) {
/*
* Some flashes have several erase regions. Different regions
* may have different eraseblock size and other
* characteristics. It looks like mostly multi-region flashes
* have one "main" region and one or more small regions to
* store boot loader code or boot parameters or whatever. I
* guess we should just pick the largest region. But this is
* not implemented.
*/
ubi_err(ubi, "multiple regions, not implemented");
return -EINVAL;
}
if (ubi->vid_hdr_offset < 0)
return -EINVAL;
/*
* Note, in this implementation we support MTD devices with 0x7FFFFFFF
* physical eraseblocks maximum.
*/
ubi->peb_size = ubi->mtd->erasesize;
ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
ubi->flash_size = ubi->mtd->size;
if (mtd_can_have_bb(ubi->mtd)) {
ubi->bad_allowed = 1;
ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
}
if (ubi->mtd->type == MTD_NORFLASH)
ubi->nor_flash = 1;
ubi->min_io_size = ubi->mtd->writesize;
ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
/*
* Make sure minimal I/O unit is power of 2. Note, there is no
* fundamental reason for this assumption. It is just an optimization
* which allows us to avoid costly division operations.
*/
if (!is_power_of_2(ubi->min_io_size)) {
ubi_err(ubi, "min. I/O unit (%d) is not power of 2",
ubi->min_io_size);
return -EINVAL;
}
ubi_assert(ubi->hdrs_min_io_size > 0);
ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
ubi->max_write_size = ubi->mtd->writebufsize;
/*
* Maximum write size has to be greater or equivalent to min. I/O
* size, and be multiple of min. I/O size.
*/
if (ubi->max_write_size < ubi->min_io_size ||
ubi->max_write_size % ubi->min_io_size ||
!is_power_of_2(ubi->max_write_size)) {
ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit",
ubi->max_write_size, ubi->min_io_size);
return -EINVAL;
}
/* Calculate default aligned sizes of EC and VID headers */
ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
dbg_gen("min_io_size %d", ubi->min_io_size);
dbg_gen("max_write_size %d", ubi->max_write_size);
dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
if (ubi->vid_hdr_offset == 0)
/* Default offset */
ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
ubi->ec_hdr_alsize;
else {
ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
~(ubi->hdrs_min_io_size - 1);
ubi->vid_hdr_shift = ubi->vid_hdr_offset -
ubi->vid_hdr_aloffset;
}
/*
* Memory allocation for VID header is ubi->vid_hdr_alsize
* which is described in comments in io.c.
* Make sure VID header shift + UBI_VID_HDR_SIZE not exceeds
* ubi->vid_hdr_alsize, so that all vid header operations
* won't access memory out of bounds.
*/
if ((ubi->vid_hdr_shift + UBI_VID_HDR_SIZE) > ubi->vid_hdr_alsize) {
ubi_err(ubi, "Invalid VID header offset %d, VID header shift(%d)"
" + VID header size(%zu) > VID header aligned size(%d).",
ubi->vid_hdr_offset, ubi->vid_hdr_shift,
UBI_VID_HDR_SIZE, ubi->vid_hdr_alsize);
return -EINVAL;
}
/* Similar for the data offset */
ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
dbg_gen("leb_start %d", ubi->leb_start);
/* The shift must be aligned to 32-bit boundary */
if (ubi->vid_hdr_shift % 4) {
ubi_err(ubi, "unaligned VID header shift %d",
ubi->vid_hdr_shift);
return -EINVAL;
}
/* Check sanity */
if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
ubi->leb_start & (ubi->min_io_size - 1)) {
ubi_err(ubi, "bad VID header (%d) or data offsets (%d)",
ubi->vid_hdr_offset, ubi->leb_start);
return -EINVAL;
}
/*
* Set maximum amount of physical erroneous eraseblocks to be 10%.
* Erroneous PEB are those which have read errors.
*/
ubi->max_erroneous = ubi->peb_count / 10;
if (ubi->max_erroneous < 16)
ubi->max_erroneous = 16;
dbg_gen("max_erroneous %d", ubi->max_erroneous);
/*
* It may happen that EC and VID headers are situated in one minimal
* I/O unit. In this case we can only accept this UBI image in
* read-only mode.
*/
if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
ubi->ro_mode = 1;
}
ubi->leb_size = ubi->peb_size - ubi->leb_start;
if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode",
ubi->mtd->index);
ubi->ro_mode = 1;
}
/*
* Note, ideally, we have to initialize @ubi->bad_peb_count here. But
* unfortunately, MTD does not provide this information. We should loop
* over all physical eraseblocks and invoke mtd->block_is_bad() for
* each physical eraseblock. So, we leave @ubi->bad_peb_count
* uninitialized so far.
*/
return 0;
}
/**
* autoresize - re-size the volume which has the "auto-resize" flag set.
* @ubi: UBI device description object
* @vol_id: ID of the volume to re-size
*
* This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
* the volume table to the largest possible size. See comments in ubi-header.h
* for more description of the flag. Returns zero in case of success and a
* negative error code in case of failure.
*/
static int autoresize(struct ubi_device *ubi, int vol_id)
{
struct ubi_volume_desc desc;
struct ubi_volume *vol = ubi->volumes[vol_id];
int err, old_reserved_pebs = vol->reserved_pebs;
if (ubi->ro_mode) {
ubi_warn(ubi, "skip auto-resize because of R/O mode");
return 0;
}
/*
* Clear the auto-resize flag in the volume in-memory copy of the
* volume table, and 'ubi_resize_volume()' will propagate this change
* to the flash.
*/
ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
if (ubi->avail_pebs == 0) {
struct ubi_vtbl_record vtbl_rec;
/*
* No available PEBs to re-size the volume, clear the flag on
* flash and exit.
*/
vtbl_rec = ubi->vtbl[vol_id];
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
ubi_err(ubi, "cannot clean auto-resize flag for volume %d",
vol_id);
} else {
desc.vol = vol;
err = ubi_resize_volume(&desc,
old_reserved_pebs + ubi->avail_pebs);
if (err)
ubi_err(ubi, "cannot auto-resize volume %d",
vol_id);
}
if (err)
return err;
ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs",
vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs);
return 0;
}
/**
* ubi_attach_mtd_dev - attach an MTD device.
* @mtd: MTD device description object
* @ubi_num: number to assign to the new UBI device
* @vid_hdr_offset: VID header offset
* @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
* @disable_fm: whether disable fastmap
*
* This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
* to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
* which case this function finds a vacant device number and assigns it
* automatically. Returns the new UBI device number in case of success and a
* negative error code in case of failure.
*
* If @disable_fm is true, ubi doesn't create new fastmap even the module param
* 'fm_autoconvert' is set, and existed old fastmap will be destroyed after
* doing full scanning.
*
* Note, the invocations of this function has to be serialized by the
* @ubi_devices_mutex.
*/
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
int vid_hdr_offset, int max_beb_per1024, bool disable_fm)
{
struct ubi_device *ubi;
int i, err;
if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
return -EINVAL;
if (!max_beb_per1024)
max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
/*
* Check if we already have the same MTD device attached.
*
* Note, this function assumes that UBI devices creations and deletions
* are serialized, so it does not take the &ubi_devices_lock.
*/
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
if (ubi && mtd->index == ubi->mtd->index) {
pr_err("ubi: mtd%d is already attached to ubi%d\n",
mtd->index, i);
return -EEXIST;
}
}
/*
* Make sure this MTD device is not emulated on top of an UBI volume
* already. Well, generally this recursion works fine, but there are
* different problems like the UBI module takes a reference to itself
* by attaching (and thus, opening) the emulated MTD device. This
* results in inability to unload the module. And in general it makes
* no sense to attach emulated MTD devices, so we prohibit this.
*/
if (mtd->type == MTD_UBIVOLUME) {
pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI\n",
mtd->index);
return -EINVAL;
}
/*
* Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
* MLC NAND is different and needs special care, otherwise UBI or UBIFS
* will die soon and you will lose all your data.
* Relax this rule if the partition we're attaching to operates in SLC
* mode.
*/
if (mtd->type == MTD_MLCNANDFLASH &&
!(mtd->flags & MTD_SLC_ON_MLC_EMULATION)) {
pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
mtd->index);
return -EINVAL;
}
if (ubi_num == UBI_DEV_NUM_AUTO) {
/* Search for an empty slot in the @ubi_devices array */
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
if (!ubi_devices[ubi_num])
break;
if (ubi_num == UBI_MAX_DEVICES) {
pr_err("ubi: only %d UBI devices may be created\n",
UBI_MAX_DEVICES);
return -ENFILE;
}
} else {
if (ubi_num >= UBI_MAX_DEVICES)
return -EINVAL;
/* Make sure ubi_num is not busy */
if (ubi_devices[ubi_num]) {
pr_err("ubi: ubi%i already exists\n", ubi_num);
return -EEXIST;
}
}
ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
if (!ubi)
return -ENOMEM;
device_initialize(&ubi->dev);
ubi->dev.release = dev_release;
ubi->dev.class = &ubi_class;
ubi->dev.groups = ubi_dev_groups;
ubi->dev.parent = &mtd->dev;
ubi->mtd = mtd;
ubi->ubi_num = ubi_num;
ubi->vid_hdr_offset = vid_hdr_offset;
ubi->autoresize_vol_id = -1;
#ifdef CONFIG_MTD_UBI_FASTMAP
ubi->fm_pool.used = ubi->fm_pool.size = 0;
ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
/*
* fm_pool.max_size is 5% of the total number of PEBs but it's also
* between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
*/
ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
ubi->fm_pool.max_size = max(ubi->fm_pool.max_size,
UBI_FM_MIN_POOL_SIZE);
ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
ubi->fm_disabled = (!fm_autoconvert || disable_fm) ? 1 : 0;
if (fm_debug)
ubi_enable_dbg_chk_fastmap(ubi);
if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
<= UBI_FM_MAX_START) {
ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.",
UBI_FM_MAX_START);
ubi->fm_disabled = 1;
}
ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size);
ubi_msg(ubi, "default fastmap WL pool size: %d",
ubi->fm_wl_pool.max_size);
#else
ubi->fm_disabled = 1;
#endif
mutex_init(&ubi->buf_mutex);
mutex_init(&ubi->ckvol_mutex);
mutex_init(&ubi->device_mutex);
spin_lock_init(&ubi->volumes_lock);
init_rwsem(&ubi->fm_protect);
init_rwsem(&ubi->fm_eba_sem);
ubi_msg(ubi, "attaching mtd%d", mtd->index);
err = io_init(ubi, max_beb_per1024);
if (err)
goto out_free;
err = -ENOMEM;
ubi->peb_buf = vmalloc(ubi->peb_size);
if (!ubi->peb_buf)
goto out_free;
#ifdef CONFIG_MTD_UBI_FASTMAP
ubi->fm_size = ubi_calc_fm_size(ubi);
ubi->fm_buf = vzalloc(ubi->fm_size);
if (!ubi->fm_buf)
goto out_free;
#endif
err = ubi_attach(ubi, disable_fm ? 1 : 0);
if (err) {
ubi_err(ubi, "failed to attach mtd%d, error %d",
mtd->index, err);
goto out_free;
}
if (ubi->autoresize_vol_id != -1) {
err = autoresize(ubi, ubi->autoresize_vol_id);
if (err)
goto out_detach;
}
err = uif_init(ubi);
if (err)
goto out_detach;
err = ubi_debugfs_init_dev(ubi);
if (err)
goto out_uif;
ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
if (IS_ERR(ubi->bgt_thread)) {
err = PTR_ERR(ubi->bgt_thread);
ubi_err(ubi, "cannot spawn \"%s\", error %d",
ubi->bgt_name, err);
goto out_debugfs;
}
ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)",
mtd->index, mtd->name, ubi->flash_size >> 20);
ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes",
ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d",
ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d",
ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d",
ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
ubi->vtbl_slots);
ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
ubi->image_seq);
ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
/*
* The below lock makes sure we do not race with 'ubi_thread()' which
* checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
*/
spin_lock(&ubi->wl_lock);
ubi->thread_enabled = 1;
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
ubi_devices[ubi_num] = ubi;
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
out_debugfs:
ubi_debugfs_exit_dev(ubi);
out_uif:
uif_close(ubi);
out_detach:
ubi_wl_close(ubi);
ubi_free_all_volumes(ubi);
vfree(ubi->vtbl);
out_free:
vfree(ubi->peb_buf);
vfree(ubi->fm_buf);
put_device(&ubi->dev);
return err;
}
/**
* ubi_detach_mtd_dev - detach an MTD device.
* @ubi_num: UBI device number to detach from
* @anyway: detach MTD even if device reference count is not zero
*
* This function destroys an UBI device number @ubi_num and detaches the
* underlying MTD device. Returns zero in case of success and %-EBUSY if the
* UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
* exist.
*
* Note, the invocations of this function has to be serialized by the
* @ubi_devices_mutex.
*/
int ubi_detach_mtd_dev(int ubi_num, int anyway)
{
struct ubi_device *ubi;
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
return -EINVAL;
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -EINVAL;
spin_lock(&ubi_devices_lock);
put_device(&ubi->dev);
ubi->ref_count -= 1;
if (ubi->ref_count) {
if (!anyway) {
spin_unlock(&ubi_devices_lock);
return -EBUSY;
}
/* This may only happen if there is a bug */
ubi_err(ubi, "%s reference count %d, destroy anyway",
ubi->ubi_name, ubi->ref_count);
}
ubi_devices[ubi_num] = NULL;
spin_unlock(&ubi_devices_lock);
ubi_assert(ubi_num == ubi->ubi_num);
ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
#ifdef CONFIG_MTD_UBI_FASTMAP
/* If we don't write a new fastmap at detach time we lose all
* EC updates that have been made since the last written fastmap.
* In case of fastmap debugging we omit the update to simulate an
* unclean shutdown. */
if (!ubi_dbg_chk_fastmap(ubi))
ubi_update_fastmap(ubi);
#endif
/*
* Before freeing anything, we have to stop the background thread to
* prevent it from doing anything on this device while we are freeing.
*/
if (ubi->bgt_thread)
kthread_stop(ubi->bgt_thread);
#ifdef CONFIG_MTD_UBI_FASTMAP
cancel_work_sync(&ubi->fm_work);
#endif
ubi_debugfs_exit_dev(ubi);
uif_close(ubi);
ubi_wl_close(ubi);
ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
vfree(ubi->peb_buf);
vfree(ubi->fm_buf);
ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
put_mtd_device(ubi->mtd);
put_device(&ubi->dev);
return 0;
}
/**
* open_mtd_by_chdev - open an MTD device by its character device node path.
* @mtd_dev: MTD character device node path
*
* This helper function opens an MTD device by its character node device path.
* Returns MTD device description object in case of success and a negative
* error code in case of failure.
*/
static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
{
int err, minor;
struct path path;
struct kstat stat;
/* Probably this is an MTD character device node path */
err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
if (err)
return ERR_PTR(err);
err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
path_put(&path);
if (err)
return ERR_PTR(err);
/* MTD device number is defined by the major / minor numbers */
if (MAJOR(stat.rdev) != MTD_CHAR_MAJOR || !S_ISCHR(stat.mode))
return ERR_PTR(-EINVAL);
minor = MINOR(stat.rdev);
if (minor & 1)
/*
* Just do not think the "/dev/mtdrX" devices support is need,
* so do not support them to avoid doing extra work.
*/
return ERR_PTR(-EINVAL);
return get_mtd_device(NULL, minor / 2);
}
/**
* open_mtd_device - open MTD device by name, character device path, or number.
* @mtd_dev: name, character device node path, or MTD device device number
*
* This function tries to open and MTD device described by @mtd_dev string,
* which is first treated as ASCII MTD device number, and if it is not true, it
* is treated as MTD device name, and if that is also not true, it is treated
* as MTD character device node path. Returns MTD device description object in
* case of success and a negative error code in case of failure.
*/
static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
{
struct mtd_info *mtd;
int mtd_num;
char *endp;
mtd_num = simple_strtoul(mtd_dev, &endp, 0);
if (*endp != '\0' || mtd_dev == endp) {
/*
* This does not look like an ASCII integer, probably this is
* MTD device name.
*/
mtd = get_mtd_device_nm(mtd_dev);
if (PTR_ERR(mtd) == -ENODEV)
/* Probably this is an MTD character device node path */
mtd = open_mtd_by_chdev(mtd_dev);
} else
mtd = get_mtd_device(NULL, mtd_num);
return mtd;
}
static int __init ubi_init(void)
{
int err, i, k;
/* Ensure that EC and VID headers have correct size */
BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
if (mtd_devs > UBI_MAX_DEVICES) {
pr_err("UBI error: too many MTD devices, maximum is %d\n",
UBI_MAX_DEVICES);
return -EINVAL;
}
/* Create base sysfs directory and sysfs files */
err = class_register(&ubi_class);
if (err < 0)
return err;
err = misc_register(&ubi_ctrl_cdev);
if (err) {
pr_err("UBI error: cannot register device\n");
goto out;
}
ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
sizeof(struct ubi_wl_entry),
0, 0, NULL);
if (!ubi_wl_entry_slab) {
err = -ENOMEM;
goto out_dev_unreg;
}
err = ubi_debugfs_init();
if (err)
goto out_slab;
/* Attach MTD devices */
for (i = 0; i < mtd_devs; i++) {
struct mtd_dev_param *p = &mtd_dev_param[i];
struct mtd_info *mtd;
cond_resched();
mtd = open_mtd_device(p->name);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
pr_err("UBI error: cannot open mtd %s, error %d\n",
p->name, err);
/* See comment below re-ubi_is_module(). */
if (ubi_is_module())
goto out_detach;
continue;
}
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, p->ubi_num,
p->vid_hdr_offs, p->max_beb_per1024,
p->enable_fm == 0);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
pr_err("UBI error: cannot attach mtd%d\n",
mtd->index);
put_mtd_device(mtd);
/*
* Originally UBI stopped initializing on any error.
* However, later on it was found out that this
* behavior is not very good when UBI is compiled into
* the kernel and the MTD devices to attach are passed
* through the command line. Indeed, UBI failure
* stopped whole boot sequence.
*
* To fix this, we changed the behavior for the
* non-module case, but preserved the old behavior for
* the module case, just for compatibility. This is a
* little inconsistent, though.
*/
if (ubi_is_module())
goto out_detach;
}
}
err = ubiblock_init();
if (err) {
pr_err("UBI error: block: cannot initialize, error %d\n", err);
/* See comment above re-ubi_is_module(). */
if (ubi_is_module())
goto out_detach;
}
return 0;
out_detach:
for (k = 0; k < i; k++)
if (ubi_devices[k]) {
mutex_lock(&ubi_devices_mutex);
ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
mutex_unlock(&ubi_devices_mutex);
}
ubi_debugfs_exit();
out_slab:
kmem_cache_destroy(ubi_wl_entry_slab);
out_dev_unreg:
misc_deregister(&ubi_ctrl_cdev);
out:
class_unregister(&ubi_class);
pr_err("UBI error: cannot initialize UBI, error %d\n", err);
return err;
}
late_initcall(ubi_init);
static void __exit ubi_exit(void)
{
int i;
ubiblock_exit();
for (i = 0; i < UBI_MAX_DEVICES; i++)
if (ubi_devices[i]) {
mutex_lock(&ubi_devices_mutex);
ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
mutex_unlock(&ubi_devices_mutex);
}
ubi_debugfs_exit();
kmem_cache_destroy(ubi_wl_entry_slab);
misc_deregister(&ubi_ctrl_cdev);
class_unregister(&ubi_class);
}
module_exit(ubi_exit);
/**
* bytes_str_to_int - convert a number of bytes string into an integer.
* @str: the string to convert
*
* This function returns positive resulting integer in case of success and a
* negative error code in case of failure.
*/
static int bytes_str_to_int(const char *str)
{
char *endp;
unsigned long result;
result = simple_strtoul(str, &endp, 0);
if (str == endp || result >= INT_MAX) {
pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
return -EINVAL;
}
switch (*endp) {
case 'G':
result *= 1024;
fallthrough;
case 'M':
result *= 1024;
fallthrough;
case 'K':
result *= 1024;
break;
case '\0':
break;
default:
pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
return -EINVAL;
}
return result;
}
/**
* ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
* @val: the parameter value to parse
* @kp: not used
*
* This function returns zero in case of success and a negative error code in
* case of error.
*/
static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
{
int i, len;
struct mtd_dev_param *p;
char buf[MTD_PARAM_LEN_MAX];
char *pbuf = &buf[0];
char *tokens[MTD_PARAM_MAX_COUNT], *token;
if (!val)
return -EINVAL;
if (mtd_devs == UBI_MAX_DEVICES) {
pr_err("UBI error: too many parameters, max. is %d\n",
UBI_MAX_DEVICES);
return -EINVAL;
}
len = strnlen(val, MTD_PARAM_LEN_MAX);
if (len == MTD_PARAM_LEN_MAX) {
pr_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
val, MTD_PARAM_LEN_MAX);
return -EINVAL;
}
if (len == 0) {
pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
return 0;
}
strcpy(buf, val);
/* Get rid of the final newline */
if (buf[len - 1] == '\n')
buf[len - 1] = '\0';
for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
tokens[i] = strsep(&pbuf, ",");
if (pbuf) {
pr_err("UBI error: too many arguments at \"%s\"\n", val);
return -EINVAL;
}
p = &mtd_dev_param[mtd_devs];
strcpy(&p->name[0], tokens[0]);
token = tokens[1];
if (token) {
p->vid_hdr_offs = bytes_str_to_int(token);
if (p->vid_hdr_offs < 0)
return p->vid_hdr_offs;
}
token = tokens[2];
if (token) {
int err = kstrtoint(token, 10, &p->max_beb_per1024);
if (err) {
pr_err("UBI error: bad value for max_beb_per1024 parameter: %s\n",
token);
return -EINVAL;
}
}
token = tokens[3];
if (token) {
int err = kstrtoint(token, 10, &p->ubi_num);
if (err) {
pr_err("UBI error: bad value for ubi_num parameter: %s\n",
token);
return -EINVAL;
}
} else
p->ubi_num = UBI_DEV_NUM_AUTO;
token = tokens[4];
if (token) {
int err = kstrtoint(token, 10, &p->enable_fm);
if (err) {
pr_err("UBI error: bad value for enable_fm parameter: %s\n",
token);
return -EINVAL;
}
} else
p->enable_fm = 0;
mtd_devs += 1;
return 0;
}
module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 0400);
MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024[,ubi_num]]].\n"
"Multiple \"mtd\" parameters may be specified.\n"
"MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
"Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
"Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
__stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
"Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
"Optional \"enable_fm\" parameter determines whether to enable fastmap during attach. If the value is non-zero, fastmap is enabled. Default value is 0.\n"
"\n"
"Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
"Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
"Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
"Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
"example 5: mtd=1,0,0,5 mtd=2,0,0,6,1 - attach MTD device /dev/mtd1 to UBI 5 and disable fastmap; attach MTD device /dev/mtd2 to UBI 6 and enable fastmap.(only works when fastmap is enabled and fm_autoconvert=Y).\n"
"\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
#ifdef CONFIG_MTD_UBI_FASTMAP
module_param(fm_autoconvert, bool, 0644);
MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
module_param(fm_debug, bool, 0);
MODULE_PARM_DESC(fm_debug, "Set this parameter to enable fastmap debugging by default. Warning, this will make fastmap slow!");
#endif
MODULE_VERSION(__stringify(UBI_VERSION));
MODULE_DESCRIPTION("UBI - Unsorted Block Images");
MODULE_AUTHOR("Artem Bityutskiy");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/ubi/build.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) International Business Machines Corp., 2006
* Copyright (c) Nokia Corporation, 2006, 2007
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/*
* This file includes volume table manipulation code. The volume table is an
* on-flash table containing volume meta-data like name, number of reserved
* physical eraseblocks, type, etc. The volume table is stored in the so-called
* "layout volume".
*
* The layout volume is an internal volume which is organized as follows. It
* consists of two logical eraseblocks - LEB 0 and LEB 1. Each logical
* eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each
* other. This redundancy guarantees robustness to unclean reboots. The volume
* table is basically an array of volume table records. Each record contains
* full information about the volume and protected by a CRC checksum. Note,
* nowadays we use the atomic LEB change operation when updating the volume
* table, so we do not really need 2 LEBs anymore, but we preserve the older
* design for the backward compatibility reasons.
*
* When the volume table is changed, it is first changed in RAM. Then LEB 0 is
* erased, and the updated volume table is written back to LEB 0. Then same for
* LEB 1. This scheme guarantees recoverability from unclean reboots.
*
* In this UBI implementation the on-flash volume table does not contain any
* information about how much data static volumes contain.
*
* But it would still be beneficial to store this information in the volume
* table. For example, suppose we have a static volume X, and all its physical
* eraseblocks became bad for some reasons. Suppose we are attaching the
* corresponding MTD device, for some reason we find no logical eraseblocks
* corresponding to the volume X. According to the volume table volume X does
* exist. So we don't know whether it is just empty or all its physical
* eraseblocks went bad. So we cannot alarm the user properly.
*
* The volume table also stores so-called "update marker", which is used for
* volume updates. Before updating the volume, the update marker is set, and
* after the update operation is finished, the update marker is cleared. So if
* the update operation was interrupted (e.g. by an unclean reboot) - the
* update marker is still there and we know that the volume's contents is
* damaged.
*/
#include <linux/crc32.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <asm/div64.h>
#include "ubi.h"
static void self_vtbl_check(const struct ubi_device *ubi);
/* Empty volume table record */
static struct ubi_vtbl_record empty_vtbl_record;
/**
* ubi_update_layout_vol - helper for updatting layout volumes on flash
* @ubi: UBI device description object
*/
static int ubi_update_layout_vol(struct ubi_device *ubi)
{
struct ubi_volume *layout_vol;
int i, err;
layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl,
ubi->vtbl_size);
if (err)
return err;
}
return 0;
}
/**
* ubi_change_vtbl_record - change volume table record.
* @ubi: UBI device description object
* @idx: table index to change
* @vtbl_rec: new volume table record
*
* This function changes volume table record @idx. If @vtbl_rec is %NULL, empty
* volume table record is written. The caller does not have to calculate CRC of
* the record as it is done by this function. Returns zero in case of success
* and a negative error code in case of failure.
*/
int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
struct ubi_vtbl_record *vtbl_rec)
{
int err;
uint32_t crc;
ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
if (!vtbl_rec)
vtbl_rec = &empty_vtbl_record;
else {
crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC);
vtbl_rec->crc = cpu_to_be32(crc);
}
memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
err = ubi_update_layout_vol(ubi);
self_vtbl_check(ubi);
return err ? err : 0;
}
/**
* ubi_vtbl_rename_volumes - rename UBI volumes in the volume table.
* @ubi: UBI device description object
* @rename_list: list of &struct ubi_rename_entry objects
*
* This function re-names multiple volumes specified in @req in the volume
* table. Returns zero in case of success and a negative error code in case of
* failure.
*/
int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
struct list_head *rename_list)
{
struct ubi_rename_entry *re;
list_for_each_entry(re, rename_list, list) {
uint32_t crc;
struct ubi_volume *vol = re->desc->vol;
struct ubi_vtbl_record *vtbl_rec = &ubi->vtbl[vol->vol_id];
if (re->remove) {
memcpy(vtbl_rec, &empty_vtbl_record,
sizeof(struct ubi_vtbl_record));
continue;
}
vtbl_rec->name_len = cpu_to_be16(re->new_name_len);
memcpy(vtbl_rec->name, re->new_name, re->new_name_len);
memset(vtbl_rec->name + re->new_name_len, 0,
UBI_VOL_NAME_MAX + 1 - re->new_name_len);
crc = crc32(UBI_CRC32_INIT, vtbl_rec,
UBI_VTBL_RECORD_SIZE_CRC);
vtbl_rec->crc = cpu_to_be32(crc);
}
return ubi_update_layout_vol(ubi);
}
/**
* vtbl_check - check if volume table is not corrupted and sensible.
* @ubi: UBI device description object
* @vtbl: volume table
*
* This function returns zero if @vtbl is all right, %1 if CRC is incorrect,
* and %-EINVAL if it contains inconsistent data.
*/
static int vtbl_check(const struct ubi_device *ubi,
const struct ubi_vtbl_record *vtbl)
{
int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len;
int upd_marker, err;
uint32_t crc;
const char *name;
for (i = 0; i < ubi->vtbl_slots; i++) {
cond_resched();
reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
alignment = be32_to_cpu(vtbl[i].alignment);
data_pad = be32_to_cpu(vtbl[i].data_pad);
upd_marker = vtbl[i].upd_marker;
vol_type = vtbl[i].vol_type;
name_len = be16_to_cpu(vtbl[i].name_len);
name = &vtbl[i].name[0];
crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC);
if (be32_to_cpu(vtbl[i].crc) != crc) {
ubi_err(ubi, "bad CRC at record %u: %#08x, not %#08x",
i, crc, be32_to_cpu(vtbl[i].crc));
ubi_dump_vtbl_record(&vtbl[i], i);
return 1;
}
if (reserved_pebs == 0) {
if (memcmp(&vtbl[i], &empty_vtbl_record,
UBI_VTBL_RECORD_SIZE)) {
err = 2;
goto bad;
}
continue;
}
if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 ||
name_len < 0) {
err = 3;
goto bad;
}
if (alignment > ubi->leb_size || alignment == 0) {
err = 4;
goto bad;
}
n = alignment & (ubi->min_io_size - 1);
if (alignment != 1 && n) {
err = 5;
goto bad;
}
n = ubi->leb_size % alignment;
if (data_pad != n) {
ubi_err(ubi, "bad data_pad, has to be %d", n);
err = 6;
goto bad;
}
if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
err = 7;
goto bad;
}
if (upd_marker != 0 && upd_marker != 1) {
err = 8;
goto bad;
}
if (reserved_pebs > ubi->good_peb_count) {
ubi_err(ubi, "too large reserved_pebs %d, good PEBs %d",
reserved_pebs, ubi->good_peb_count);
err = 9;
goto bad;
}
if (name_len > UBI_VOL_NAME_MAX) {
err = 10;
goto bad;
}
if (name[0] == '\0') {
err = 11;
goto bad;
}
if (name_len != strnlen(name, name_len + 1)) {
err = 12;
goto bad;
}
}
/* Checks that all names are unique */
for (i = 0; i < ubi->vtbl_slots - 1; i++) {
for (n = i + 1; n < ubi->vtbl_slots; n++) {
int len1 = be16_to_cpu(vtbl[i].name_len);
int len2 = be16_to_cpu(vtbl[n].name_len);
if (len1 > 0 && len1 == len2 &&
!strncmp(vtbl[i].name, vtbl[n].name, len1)) {
ubi_err(ubi, "volumes %d and %d have the same name \"%s\"",
i, n, vtbl[i].name);
ubi_dump_vtbl_record(&vtbl[i], i);
ubi_dump_vtbl_record(&vtbl[n], n);
return -EINVAL;
}
}
}
return 0;
bad:
ubi_err(ubi, "volume table check failed: record %d, error %d", i, err);
ubi_dump_vtbl_record(&vtbl[i], i);
return -EINVAL;
}
/**
* create_vtbl - create a copy of volume table.
* @ubi: UBI device description object
* @ai: attaching information
* @copy: number of the volume table copy
* @vtbl: contents of the volume table
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
int copy, void *vtbl)
{
int err, tries = 0;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
struct ubi_ainf_peb *new_aeb;
dbg_gen("create volume table (copy #%d)", copy + 1);
vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!vidb)
return -ENOMEM;
vid_hdr = ubi_get_vid_hdr(vidb);
retry:
new_aeb = ubi_early_get_peb(ubi, ai);
if (IS_ERR(new_aeb)) {
err = PTR_ERR(new_aeb);
goto out_free;
}
vid_hdr->vol_type = UBI_LAYOUT_VOLUME_TYPE;
vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
vid_hdr->data_size = vid_hdr->used_ebs =
vid_hdr->data_pad = cpu_to_be32(0);
vid_hdr->lnum = cpu_to_be32(copy);
vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum);
/* The EC header is already there, write the VID header */
err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vidb);
if (err)
goto write_error;
/* Write the layout volume contents */
err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size);
if (err)
goto write_error;
/*
* And add it to the attaching information. Don't delete the old version
* of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
*/
err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
ubi_free_aeb(ai, new_aeb);
ubi_free_vid_buf(vidb);
return err;
write_error:
if (err == -EIO && ++tries <= 5) {
/*
* Probably this physical eraseblock went bad, try to pick
* another one.
*/
list_add(&new_aeb->u.list, &ai->erase);
goto retry;
}
ubi_free_aeb(ai, new_aeb);
out_free:
ubi_free_vid_buf(vidb);
return err;
}
/**
* process_lvol - process the layout volume.
* @ubi: UBI device description object
* @ai: attaching information
* @av: layout volume attaching information
*
* This function is responsible for reading the layout volume, ensuring it is
* not corrupted, and recovering from corruptions if needed. Returns volume
* table in case of success and a negative error code in case of failure.
*/
static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
struct ubi_attach_info *ai,
struct ubi_ainf_volume *av)
{
int err;
struct rb_node *rb;
struct ubi_ainf_peb *aeb;
struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL };
int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1};
/*
* UBI goes through the following steps when it changes the layout
* volume:
* a. erase LEB 0;
* b. write new data to LEB 0;
* c. erase LEB 1;
* d. write new data to LEB 1.
*
* Before the change, both LEBs contain the same data.
*
* Due to unclean reboots, the contents of LEB 0 may be lost, but there
* should LEB 1. So it is OK if LEB 0 is corrupted while LEB 1 is not.
* Similarly, LEB 1 may be lost, but there should be LEB 0. And
* finally, unclean reboots may result in a situation when neither LEB
* 0 nor LEB 1 are corrupted, but they are different. In this case, LEB
* 0 contains more recent information.
*
* So the plan is to first check LEB 0. Then
* a. if LEB 0 is OK, it must be containing the most recent data; then
* we compare it with LEB 1, and if they are different, we copy LEB
* 0 to LEB 1;
* b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1
* to LEB 0.
*/
dbg_gen("check layout volume");
/* Read both LEB 0 and LEB 1 into memory */
ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
leb[aeb->lnum] = vzalloc(ubi->vtbl_size);
if (!leb[aeb->lnum]) {
err = -ENOMEM;
goto out_free;
}
err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0,
ubi->vtbl_size);
if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err))
/*
* Scrub the PEB later. Note, -EBADMSG indicates an
* uncorrectable ECC error, but we have our own CRC and
* the data will be checked later. If the data is OK,
* the PEB will be scrubbed (because we set
* aeb->scrub). If the data is not OK, the contents of
* the PEB will be recovered from the second copy, and
* aeb->scrub will be cleared in
* 'ubi_add_to_av()'.
*/
aeb->scrub = 1;
else if (err)
goto out_free;
}
err = -EINVAL;
if (leb[0]) {
leb_corrupted[0] = vtbl_check(ubi, leb[0]);
if (leb_corrupted[0] < 0)
goto out_free;
}
if (!leb_corrupted[0]) {
/* LEB 0 is OK */
if (leb[1])
leb_corrupted[1] = memcmp(leb[0], leb[1],
ubi->vtbl_size);
if (leb_corrupted[1]) {
ubi_warn(ubi, "volume table copy #2 is corrupted");
err = create_vtbl(ubi, ai, 1, leb[0]);
if (err)
goto out_free;
ubi_msg(ubi, "volume table was restored");
}
/* Both LEB 1 and LEB 2 are OK and consistent */
vfree(leb[1]);
return leb[0];
} else {
/* LEB 0 is corrupted or does not exist */
if (leb[1]) {
leb_corrupted[1] = vtbl_check(ubi, leb[1]);
if (leb_corrupted[1] < 0)
goto out_free;
}
if (leb_corrupted[1]) {
/* Both LEB 0 and LEB 1 are corrupted */
ubi_err(ubi, "both volume tables are corrupted");
goto out_free;
}
ubi_warn(ubi, "volume table copy #1 is corrupted");
err = create_vtbl(ubi, ai, 0, leb[1]);
if (err)
goto out_free;
ubi_msg(ubi, "volume table was restored");
vfree(leb[0]);
return leb[1];
}
out_free:
vfree(leb[0]);
vfree(leb[1]);
return ERR_PTR(err);
}
/**
* create_empty_lvol - create empty layout volume.
* @ubi: UBI device description object
* @ai: attaching information
*
* This function returns volume table contents in case of success and a
* negative error code in case of failure.
*/
static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
struct ubi_attach_info *ai)
{
int i;
struct ubi_vtbl_record *vtbl;
vtbl = vzalloc(ubi->vtbl_size);
if (!vtbl)
return ERR_PTR(-ENOMEM);
for (i = 0; i < ubi->vtbl_slots; i++)
memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
int err;
err = create_vtbl(ubi, ai, i, vtbl);
if (err) {
vfree(vtbl);
return ERR_PTR(err);
}
}
return vtbl;
}
/**
* init_volumes - initialize volume information for existing volumes.
* @ubi: UBI device description object
* @ai: scanning information
* @vtbl: volume table
*
* This function allocates volume description objects for existing volumes.
* Returns zero in case of success and a negative error code in case of
* failure.
*/
static int init_volumes(struct ubi_device *ubi,
const struct ubi_attach_info *ai,
const struct ubi_vtbl_record *vtbl)
{
int i, err, reserved_pebs = 0;
struct ubi_ainf_volume *av;
struct ubi_volume *vol;
for (i = 0; i < ubi->vtbl_slots; i++) {
cond_resched();
if (be32_to_cpu(vtbl[i].reserved_pebs) == 0)
continue; /* Empty record */
vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
if (!vol)
return -ENOMEM;
vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
vol->alignment = be32_to_cpu(vtbl[i].alignment);
vol->data_pad = be32_to_cpu(vtbl[i].data_pad);
vol->upd_marker = vtbl[i].upd_marker;
vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ?
UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
vol->name_len = be16_to_cpu(vtbl[i].name_len);
vol->usable_leb_size = ubi->leb_size - vol->data_pad;
memcpy(vol->name, vtbl[i].name, vol->name_len);
vol->name[vol->name_len] = '\0';
vol->vol_id = i;
if (vtbl[i].flags & UBI_VTBL_SKIP_CRC_CHECK_FLG)
vol->skip_check = 1;
if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
/* Auto re-size flag may be set only for one volume */
if (ubi->autoresize_vol_id != -1) {
ubi_err(ubi, "more than one auto-resize volume (%d and %d)",
ubi->autoresize_vol_id, i);
kfree(vol);
return -EINVAL;
}
ubi->autoresize_vol_id = i;
}
ubi_assert(!ubi->volumes[i]);
ubi->volumes[i] = vol;
ubi->vol_count += 1;
vol->ubi = ubi;
reserved_pebs += vol->reserved_pebs;
/*
* We use ubi->peb_count and not vol->reserved_pebs because
* we want to keep the code simple. Otherwise we'd have to
* resize/check the bitmap upon volume resize too.
* Allocating a few bytes more does not hurt.
*/
err = ubi_fastmap_init_checkmap(vol, ubi->peb_count);
if (err)
return err;
/*
* In case of dynamic volume UBI knows nothing about how many
* data is stored there. So assume the whole volume is used.
*/
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = vol->reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
vol->used_bytes =
(long long)vol->used_ebs * vol->usable_leb_size;
continue;
}
/* Static volumes only */
av = ubi_find_av(ai, i);
if (!av || !av->leb_count) {
/*
* No eraseblocks belonging to this volume found. We
* don't actually know whether this static volume is
* completely corrupted or just contains no data. And
* we cannot know this as long as data size is not
* stored on flash. So we just assume the volume is
* empty. FIXME: this should be handled.
*/
continue;
}
if (av->leb_count != av->used_ebs) {
/*
* We found a static volume which misses several
* eraseblocks. Treat it as corrupted.
*/
ubi_warn(ubi, "static volume %d misses %d LEBs - corrupted",
av->vol_id, av->used_ebs - av->leb_count);
vol->corrupted = 1;
continue;
}
vol->used_ebs = av->used_ebs;
vol->used_bytes =
(long long)(vol->used_ebs - 1) * vol->usable_leb_size;
vol->used_bytes += av->last_data_size;
vol->last_eb_bytes = av->last_data_size;
}
/* And add the layout volume */
vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
if (!vol)
return -ENOMEM;
vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS;
vol->alignment = UBI_LAYOUT_VOLUME_ALIGN;
vol->vol_type = UBI_DYNAMIC_VOLUME;
vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1;
memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1);
vol->usable_leb_size = ubi->leb_size;
vol->used_ebs = vol->reserved_pebs;
vol->last_eb_bytes = vol->reserved_pebs;
vol->used_bytes =
(long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
vol->vol_id = UBI_LAYOUT_VOLUME_ID;
vol->ref_count = 1;
ubi_assert(!ubi->volumes[i]);
ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol;
reserved_pebs += vol->reserved_pebs;
ubi->vol_count += 1;
vol->ubi = ubi;
err = ubi_fastmap_init_checkmap(vol, UBI_LAYOUT_VOLUME_EBS);
if (err)
return err;
if (reserved_pebs > ubi->avail_pebs) {
ubi_err(ubi, "not enough PEBs, required %d, available %d",
reserved_pebs, ubi->avail_pebs);
if (ubi->corr_peb_count)
ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
return -ENOSPC;
}
ubi->rsvd_pebs += reserved_pebs;
ubi->avail_pebs -= reserved_pebs;
return 0;
}
/**
* check_av - check volume attaching information.
* @vol: UBI volume description object
* @av: volume attaching information
*
* This function returns zero if the volume attaching information is consistent
* to the data read from the volume tabla, and %-EINVAL if not.
*/
static int check_av(const struct ubi_volume *vol,
const struct ubi_ainf_volume *av)
{
int err;
if (av->highest_lnum >= vol->reserved_pebs) {
err = 1;
goto bad;
}
if (av->leb_count > vol->reserved_pebs) {
err = 2;
goto bad;
}
if (av->vol_type != vol->vol_type) {
err = 3;
goto bad;
}
if (av->used_ebs > vol->reserved_pebs) {
err = 4;
goto bad;
}
if (av->data_pad != vol->data_pad) {
err = 5;
goto bad;
}
return 0;
bad:
ubi_err(vol->ubi, "bad attaching information, error %d", err);
ubi_dump_av(av);
ubi_dump_vol_info(vol);
return -EINVAL;
}
/**
* check_attaching_info - check that attaching information.
* @ubi: UBI device description object
* @ai: attaching information
*
* Even though we protect on-flash data by CRC checksums, we still don't trust
* the media. This function ensures that attaching information is consistent to
* the information read from the volume table. Returns zero if the attaching
* information is OK and %-EINVAL if it is not.
*/
static int check_attaching_info(const struct ubi_device *ubi,
struct ubi_attach_info *ai)
{
int err, i;
struct ubi_ainf_volume *av;
struct ubi_volume *vol;
if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
ubi_err(ubi, "found %d volumes while attaching, maximum is %d + %d",
ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
return -EINVAL;
}
if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
ai->highest_vol_id < UBI_INTERNAL_VOL_START) {
ubi_err(ubi, "too large volume ID %d found",
ai->highest_vol_id);
return -EINVAL;
}
for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
cond_resched();
av = ubi_find_av(ai, i);
vol = ubi->volumes[i];
if (!vol) {
if (av)
ubi_remove_av(ai, av);
continue;
}
if (vol->reserved_pebs == 0) {
ubi_assert(i < ubi->vtbl_slots);
if (!av)
continue;
/*
* During attaching we found a volume which does not
* exist according to the information in the volume
* table. This must have happened due to an unclean
* reboot while the volume was being removed. Discard
* these eraseblocks.
*/
ubi_msg(ubi, "finish volume %d removal", av->vol_id);
ubi_remove_av(ai, av);
} else if (av) {
err = check_av(vol, av);
if (err)
return err;
}
}
return 0;
}
/**
* ubi_read_volume_table - read the volume table.
* @ubi: UBI device description object
* @ai: attaching information
*
* This function reads volume table, checks it, recover from errors if needed,
* or creates it if needed. Returns zero in case of success and a negative
* error code in case of failure.
*/
int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int err;
struct ubi_ainf_volume *av;
empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
/*
* The number of supported volumes is limited by the eraseblock size
* and by the UBI_MAX_VOLUMES constant.
*/
ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
ubi->vtbl_slots = UBI_MAX_VOLUMES;
ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
av = ubi_find_av(ai, UBI_LAYOUT_VOLUME_ID);
if (!av) {
/*
* No logical eraseblocks belonging to the layout volume were
* found. This could mean that the flash is just empty. In
* this case we create empty layout volume.
*
* But if flash is not empty this must be a corruption or the
* MTD device just contains garbage.
*/
if (ai->is_empty) {
ubi->vtbl = create_empty_lvol(ubi, ai);
if (IS_ERR(ubi->vtbl))
return PTR_ERR(ubi->vtbl);
} else {
ubi_err(ubi, "the layout volume was not found");
return -EINVAL;
}
} else {
if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) {
/* This must not happen with proper UBI images */
ubi_err(ubi, "too many LEBs (%d) in layout volume",
av->leb_count);
return -EINVAL;
}
ubi->vtbl = process_lvol(ubi, ai, av);
if (IS_ERR(ubi->vtbl))
return PTR_ERR(ubi->vtbl);
}
ubi->avail_pebs = ubi->good_peb_count - ubi->corr_peb_count;
/*
* The layout volume is OK, initialize the corresponding in-RAM data
* structures.
*/
err = init_volumes(ubi, ai, ubi->vtbl);
if (err)
goto out_free;
/*
* Make sure that the attaching information is consistent to the
* information stored in the volume table.
*/
err = check_attaching_info(ubi, ai);
if (err)
goto out_free;
return 0;
out_free:
vfree(ubi->vtbl);
ubi_free_all_volumes(ubi);
return err;
}
/**
* self_vtbl_check - check volume table.
* @ubi: UBI device description object
*/
static void self_vtbl_check(const struct ubi_device *ubi)
{
if (!ubi_dbg_chk_gen(ubi))
return;
if (vtbl_check(ubi, ubi->vtbl)) {
ubi_err(ubi, "self-check failed");
BUG();
}
}
| linux-master | drivers/mtd/ubi/vtbl.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) International Business Machines Corp., 2006
*
* Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
*/
/*
* UBI wear-leveling sub-system.
*
* This sub-system is responsible for wear-leveling. It works in terms of
* physical eraseblocks and erase counters and knows nothing about logical
* eraseblocks, volumes, etc. From this sub-system's perspective all physical
* eraseblocks are of two types - used and free. Used physical eraseblocks are
* those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
* eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
*
* Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
* header. The rest of the physical eraseblock contains only %0xFF bytes.
*
* When physical eraseblocks are returned to the WL sub-system by means of the
* 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
* done asynchronously in context of the per-UBI device background thread,
* which is also managed by the WL sub-system.
*
* The wear-leveling is ensured by means of moving the contents of used
* physical eraseblocks with low erase counter to free physical eraseblocks
* with high erase counter.
*
* If the WL sub-system fails to erase a physical eraseblock, it marks it as
* bad.
*
* This sub-system is also responsible for scrubbing. If a bit-flip is detected
* in a physical eraseblock, it has to be moved. Technically this is the same
* as moving it for wear-leveling reasons.
*
* As it was said, for the UBI sub-system all physical eraseblocks are either
* "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
* used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
* RB-trees, as well as (temporarily) in the @wl->pq queue.
*
* When the WL sub-system returns a physical eraseblock, the physical
* eraseblock is protected from being moved for some "time". For this reason,
* the physical eraseblock is not directly moved from the @wl->free tree to the
* @wl->used tree. There is a protection queue in between where this
* physical eraseblock is temporarily stored (@wl->pq).
*
* All this protection stuff is needed because:
* o we don't want to move physical eraseblocks just after we have given them
* to the user; instead, we first want to let users fill them up with data;
*
* o there is a chance that the user will put the physical eraseblock very
* soon, so it makes sense not to move it for some time, but wait.
*
* Physical eraseblocks stay protected only for limited time. But the "time" is
* measured in erase cycles in this case. This is implemented with help of the
* protection queue. Eraseblocks are put to the tail of this queue when they
* are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
* head of the queue on each erase operation (for any eraseblock). So the
* length of the queue defines how may (global) erase cycles PEBs are protected.
*
* To put it differently, each physical eraseblock has 2 main states: free and
* used. The former state corresponds to the @wl->free tree. The latter state
* is split up on several sub-states:
* o the WL movement is allowed (@wl->used tree);
* o the WL movement is disallowed (@wl->erroneous) because the PEB is
* erroneous - e.g., there was a read error;
* o the WL movement is temporarily prohibited (@wl->pq queue);
* o scrubbing is needed (@wl->scrub tree).
*
* Depending on the sub-state, wear-leveling entries of the used physical
* eraseblocks may be kept in one of those structures.
*
* Note, in this implementation, we keep a small in-RAM object for each physical
* eraseblock. This is surely not a scalable solution. But it appears to be good
* enough for moderately large flashes and it is simple. In future, one may
* re-work this sub-system and make it more scalable.
*
* At the moment this sub-system does not utilize the sequence number, which
* was introduced relatively recently. But it would be wise to do this because
* the sequence number of a logical eraseblock characterizes how old is it. For
* example, when we move a PEB with low erase counter, and we need to pick the
* target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
* pick target PEB with an average EC if our PEB is not very "old". This is a
* room for future re-works of the WL sub-system.
*/
#include <linux/slab.h>
#include <linux/crc32.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include "ubi.h"
#include "wl.h"
/* Number of physical eraseblocks reserved for wear-leveling purposes */
#define WL_RESERVED_PEBS 1
/*
* Maximum difference between two erase counters. If this threshold is
* exceeded, the WL sub-system starts moving data from used physical
* eraseblocks with low erase counter to free physical eraseblocks with high
* erase counter.
*/
#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
/*
* When a physical eraseblock is moved, the WL sub-system has to pick the target
* physical eraseblock to move to. The simplest way would be just to pick the
* one with the highest erase counter. But in certain workloads this could lead
* to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
* situation when the picked physical eraseblock is constantly erased after the
* data is written to it. So, we have a constant which limits the highest erase
* counter of the free physical eraseblock to pick. Namely, the WL sub-system
* does not pick eraseblocks with erase counter greater than the lowest erase
* counter plus %WL_FREE_MAX_DIFF.
*/
#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
/*
* Maximum number of consecutive background thread failures which is enough to
* switch to read-only mode.
*/
#define WL_MAX_FAILURES 32
static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
static int self_check_in_wl_tree(const struct ubi_device *ubi,
struct ubi_wl_entry *e, struct rb_root *root);
static int self_check_in_pq(const struct ubi_device *ubi,
struct ubi_wl_entry *e);
/**
* wl_tree_add - add a wear-leveling entry to a WL RB-tree.
* @e: the wear-leveling entry to add
* @root: the root of the tree
*
* Note, we use (erase counter, physical eraseblock number) pairs as keys in
* the @ubi->used and @ubi->free RB-trees.
*/
static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
{
struct rb_node **p, *parent = NULL;
p = &root->rb_node;
while (*p) {
struct ubi_wl_entry *e1;
parent = *p;
e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
if (e->ec < e1->ec)
p = &(*p)->rb_left;
else if (e->ec > e1->ec)
p = &(*p)->rb_right;
else {
ubi_assert(e->pnum != e1->pnum);
if (e->pnum < e1->pnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
}
rb_link_node(&e->u.rb, parent, p);
rb_insert_color(&e->u.rb, root);
}
/**
* wl_entry_destroy - destroy a wear-leveling entry.
* @ubi: UBI device description object
* @e: the wear-leveling entry to add
*
* This function destroys a wear leveling entry and removes
* the reference from the lookup table.
*/
static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
{
ubi->lookuptbl[e->pnum] = NULL;
kmem_cache_free(ubi_wl_entry_slab, e);
}
/**
* do_work - do one pending work.
* @ubi: UBI device description object
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
static int do_work(struct ubi_device *ubi)
{
int err;
struct ubi_work *wrk;
cond_resched();
/*
* @ubi->work_sem is used to synchronize with the workers. Workers take
* it in read mode, so many of them may be doing works at a time. But
* the queue flush code has to be sure the whole queue of works is
* done, and it takes the mutex in write mode.
*/
down_read(&ubi->work_sem);
spin_lock(&ubi->wl_lock);
if (list_empty(&ubi->works)) {
spin_unlock(&ubi->wl_lock);
up_read(&ubi->work_sem);
return 0;
}
wrk = list_entry(ubi->works.next, struct ubi_work, list);
list_del(&wrk->list);
ubi->works_count -= 1;
ubi_assert(ubi->works_count >= 0);
spin_unlock(&ubi->wl_lock);
/*
* Call the worker function. Do not touch the work structure
* after this call as it will have been freed or reused by that
* time by the worker function.
*/
err = wrk->func(ubi, wrk, 0);
if (err)
ubi_err(ubi, "work failed with error code %d", err);
up_read(&ubi->work_sem);
return err;
}
/**
* in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
* @e: the wear-leveling entry to check
* @root: the root of the tree
*
* This function returns non-zero if @e is in the @root RB-tree and zero if it
* is not.
*/
static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
{
struct rb_node *p;
p = root->rb_node;
while (p) {
struct ubi_wl_entry *e1;
e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
if (e->pnum == e1->pnum) {
ubi_assert(e == e1);
return 1;
}
if (e->ec < e1->ec)
p = p->rb_left;
else if (e->ec > e1->ec)
p = p->rb_right;
else {
ubi_assert(e->pnum != e1->pnum);
if (e->pnum < e1->pnum)
p = p->rb_left;
else
p = p->rb_right;
}
}
return 0;
}
/**
* in_pq - check if a wear-leveling entry is present in the protection queue.
* @ubi: UBI device description object
* @e: the wear-leveling entry to check
*
* This function returns non-zero if @e is in the protection queue and zero
* if it is not.
*/
static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
{
struct ubi_wl_entry *p;
int i;
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
list_for_each_entry(p, &ubi->pq[i], u.list)
if (p == e)
return 1;
return 0;
}
/**
* prot_queue_add - add physical eraseblock to the protection queue.
* @ubi: UBI device description object
* @e: the physical eraseblock to add
*
* This function adds @e to the tail of the protection queue @ubi->pq, where
* @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
* temporarily protected from the wear-leveling worker. Note, @wl->lock has to
* be locked.
*/
static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
{
int pq_tail = ubi->pq_head - 1;
if (pq_tail < 0)
pq_tail = UBI_PROT_QUEUE_LEN - 1;
ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
}
/**
* find_wl_entry - find wear-leveling entry closest to certain erase counter.
* @ubi: UBI device description object
* @root: the RB-tree where to look for
* @diff: maximum possible difference from the smallest erase counter
*
* This function looks for a wear leveling entry with erase counter closest to
* min + @diff, where min is the smallest erase counter.
*/
static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
struct rb_root *root, int diff)
{
struct rb_node *p;
struct ubi_wl_entry *e;
int max;
e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
max = e->ec + diff;
p = root->rb_node;
while (p) {
struct ubi_wl_entry *e1;
e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
if (e1->ec >= max)
p = p->rb_left;
else {
p = p->rb_right;
e = e1;
}
}
return e;
}
/**
* find_mean_wl_entry - find wear-leveling entry with medium erase counter.
* @ubi: UBI device description object
* @root: the RB-tree where to look for
*
* This function looks for a wear leveling entry with medium erase counter,
* but not greater or equivalent than the lowest erase counter plus
* %WL_FREE_MAX_DIFF/2.
*/
static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
struct rb_root *root)
{
struct ubi_wl_entry *e, *first, *last;
first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
/* If no fastmap has been written and this WL entry can be used
* as anchor PEB, hold it back and return the second best
* WL entry such that fastmap can use the anchor PEB later. */
e = may_reserve_for_fm(ubi, e, root);
} else
e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
return e;
}
/**
* wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
* refill_wl_user_pool().
* @ubi: UBI device description object
*
* This function returns a wear leveling entry in case of success and
* NULL in case of failure.
*/
static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
{
struct ubi_wl_entry *e;
e = find_mean_wl_entry(ubi, &ubi->free);
if (!e) {
ubi_err(ubi, "no free eraseblocks");
return NULL;
}
self_check_in_wl_tree(ubi, e, &ubi->free);
/*
* Move the physical eraseblock to the protection queue where it will
* be protected from being moved for some time.
*/
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
dbg_wl("PEB %d EC %d", e->pnum, e->ec);
return e;
}
/**
* prot_queue_del - remove a physical eraseblock from the protection queue.
* @ubi: UBI device description object
* @pnum: the physical eraseblock to remove
*
* This function deletes PEB @pnum from the protection queue and returns zero
* in case of success and %-ENODEV if the PEB was not found.
*/
static int prot_queue_del(struct ubi_device *ubi, int pnum)
{
struct ubi_wl_entry *e;
e = ubi->lookuptbl[pnum];
if (!e)
return -ENODEV;
if (self_check_in_pq(ubi, e))
return -ENODEV;
list_del(&e->u.list);
dbg_wl("deleted PEB %d from the protection queue", e->pnum);
return 0;
}
/**
* sync_erase - synchronously erase a physical eraseblock.
* @ubi: UBI device description object
* @e: the physical eraseblock to erase
* @torture: if the physical eraseblock has to be tortured
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
int torture)
{
int err;
struct ubi_ec_hdr *ec_hdr;
unsigned long long ec = e->ec;
dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
err = self_check_ec(ubi, e->pnum, e->ec);
if (err)
return -EINVAL;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
if (!ec_hdr)
return -ENOMEM;
err = ubi_io_sync_erase(ubi, e->pnum, torture);
if (err < 0)
goto out_free;
ec += err;
if (ec > UBI_MAX_ERASECOUNTER) {
/*
* Erase counter overflow. Upgrade UBI and use 64-bit
* erase counters internally.
*/
ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
e->pnum, ec);
err = -EINVAL;
goto out_free;
}
dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
ec_hdr->ec = cpu_to_be64(ec);
err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
if (err)
goto out_free;
e->ec = ec;
spin_lock(&ubi->wl_lock);
if (e->ec > ubi->max_ec)
ubi->max_ec = e->ec;
spin_unlock(&ubi->wl_lock);
out_free:
kfree(ec_hdr);
return err;
}
/**
* serve_prot_queue - check if it is time to stop protecting PEBs.
* @ubi: UBI device description object
*
* This function is called after each erase operation and removes PEBs from the
* tail of the protection queue. These PEBs have been protected for long enough
* and should be moved to the used tree.
*/
static void serve_prot_queue(struct ubi_device *ubi)
{
struct ubi_wl_entry *e, *tmp;
int count;
/*
* There may be several protected physical eraseblock to remove,
* process them all.
*/
repeat:
count = 0;
spin_lock(&ubi->wl_lock);
list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
dbg_wl("PEB %d EC %d protection over, move to used tree",
e->pnum, e->ec);
list_del(&e->u.list);
wl_tree_add(e, &ubi->used);
if (count++ > 32) {
/*
* Let's be nice and avoid holding the spinlock for
* too long.
*/
spin_unlock(&ubi->wl_lock);
cond_resched();
goto repeat;
}
}
ubi->pq_head += 1;
if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
ubi->pq_head = 0;
ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
spin_unlock(&ubi->wl_lock);
}
/**
* __schedule_ubi_work - schedule a work.
* @ubi: UBI device description object
* @wrk: the work to schedule
*
* This function adds a work defined by @wrk to the tail of the pending works
* list. Can only be used if ubi->work_sem is already held in read mode!
*/
static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
{
spin_lock(&ubi->wl_lock);
list_add_tail(&wrk->list, &ubi->works);
ubi_assert(ubi->works_count >= 0);
ubi->works_count += 1;
if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
}
/**
* schedule_ubi_work - schedule a work.
* @ubi: UBI device description object
* @wrk: the work to schedule
*
* This function adds a work defined by @wrk to the tail of the pending works
* list.
*/
static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
{
down_read(&ubi->work_sem);
__schedule_ubi_work(ubi, wrk);
up_read(&ubi->work_sem);
}
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int shutdown);
/**
* schedule_erase - schedule an erase work.
* @ubi: UBI device description object
* @e: the WL entry of the physical eraseblock to erase
* @vol_id: the volume ID that last used this PEB
* @lnum: the last used logical eraseblock number for the PEB
* @torture: if the physical eraseblock has to be tortured
* @nested: denotes whether the work_sem is already held
*
* This function returns zero in case of success and a %-ENOMEM in case of
* failure.
*/
static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
int vol_id, int lnum, int torture, bool nested)
{
struct ubi_work *wl_wrk;
ubi_assert(e);
dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
e->pnum, e->ec, torture);
wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
if (!wl_wrk)
return -ENOMEM;
wl_wrk->func = &erase_worker;
wl_wrk->e = e;
wl_wrk->vol_id = vol_id;
wl_wrk->lnum = lnum;
wl_wrk->torture = torture;
if (nested)
__schedule_ubi_work(ubi, wl_wrk);
else
schedule_ubi_work(ubi, wl_wrk);
return 0;
}
static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
/**
* do_sync_erase - run the erase worker synchronously.
* @ubi: UBI device description object
* @e: the WL entry of the physical eraseblock to erase
* @vol_id: the volume ID that last used this PEB
* @lnum: the last used logical eraseblock number for the PEB
* @torture: if the physical eraseblock has to be tortured
*
*/
static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
int vol_id, int lnum, int torture)
{
struct ubi_work wl_wrk;
dbg_wl("sync erase of PEB %i", e->pnum);
wl_wrk.e = e;
wl_wrk.vol_id = vol_id;
wl_wrk.lnum = lnum;
wl_wrk.torture = torture;
return __erase_worker(ubi, &wl_wrk);
}
static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
/**
* wear_leveling_worker - wear-leveling worker function.
* @ubi: UBI device description object
* @wrk: the work object
* @shutdown: non-zero if the worker has to free memory and exit
* because the WL-subsystem is shutting down
*
* This function copies a more worn out physical eraseblock to a less worn out
* one. Returns zero in case of success and a negative error code in case of
* failure.
*/
static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int shutdown)
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
int erase = 0, keep = 0, vol_id = -1, lnum = -1;
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
int dst_leb_clean = 0;
kfree(wrk);
if (shutdown)
return 0;
vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!vidb)
return -ENOMEM;
vid_hdr = ubi_get_vid_hdr(vidb);
down_read(&ubi->fm_eba_sem);
mutex_lock(&ubi->move_mutex);
spin_lock(&ubi->wl_lock);
ubi_assert(!ubi->move_from && !ubi->move_to);
ubi_assert(!ubi->move_to_put);
#ifdef CONFIG_MTD_UBI_FASTMAP
if (!next_peb_for_wl(ubi) ||
#else
if (!ubi->free.rb_node ||
#endif
(!ubi->used.rb_node && !ubi->scrub.rb_node)) {
/*
* No free physical eraseblocks? Well, they must be waiting in
* the queue to be erased. Cancel movement - it will be
* triggered again when a free physical eraseblock appears.
*
* No used physical eraseblocks? They must be temporarily
* protected from being moved. They will be moved to the
* @ubi->used tree later and the wear-leveling will be
* triggered again.
*/
dbg_wl("cancel WL, a list is empty: free %d, used %d",
!ubi->free.rb_node, !ubi->used.rb_node);
goto out_cancel;
}
#ifdef CONFIG_MTD_UBI_FASTMAP
e1 = find_anchor_wl_entry(&ubi->used);
if (e1 && ubi->fm_anchor &&
(ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
ubi->fm_do_produce_anchor = 1;
/*
* fm_anchor is no longer considered a good anchor.
* NULL assignment also prevents multiple wear level checks
* of this PEB.
*/
wl_tree_add(ubi->fm_anchor, &ubi->free);
ubi->fm_anchor = NULL;
ubi->free_count++;
}
if (ubi->fm_do_produce_anchor) {
if (!e1)
goto out_cancel;
e2 = get_peb_for_wl(ubi);
if (!e2)
goto out_cancel;
self_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
ubi->fm_do_produce_anchor = 0;
} else if (!ubi->scrub.rb_node) {
#else
if (!ubi->scrub.rb_node) {
#endif
/*
* Now pick the least worn-out used physical eraseblock and a
* highly worn-out free physical eraseblock. If the erase
* counters differ much enough, start wear-leveling.
*/
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
e2 = get_peb_for_wl(ubi);
if (!e2)
goto out_cancel;
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
dbg_wl("no WL needed: min used EC %d, max free EC %d",
e1->ec, e2->ec);
/* Give the unused PEB back */
wl_tree_add(e2, &ubi->free);
ubi->free_count++;
goto out_cancel;
}
self_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("move PEB %d EC %d to PEB %d EC %d",
e1->pnum, e1->ec, e2->pnum, e2->ec);
} else {
/* Perform scrubbing */
scrubbing = 1;
e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
e2 = get_peb_for_wl(ubi);
if (!e2)
goto out_cancel;
self_check_in_wl_tree(ubi, e1, &ubi->scrub);
rb_erase(&e1->u.rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
}
ubi->move_from = e1;
ubi->move_to = e2;
spin_unlock(&ubi->wl_lock);
/*
* Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
* We so far do not know which logical eraseblock our physical
* eraseblock (@e1) belongs to. We have to read the volume identifier
* header first.
*
* Note, we are protected from this PEB being unmapped and erased. The
* 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
* which is being moved was unmapped.
*/
err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
if (err && err != UBI_IO_BITFLIPS) {
dst_leb_clean = 1;
if (err == UBI_IO_FF) {
/*
* We are trying to move PEB without a VID header. UBI
* always write VID headers shortly after the PEB was
* given, so we have a situation when it has not yet
* had a chance to write it, because it was preempted.
* So add this PEB to the protection queue so far,
* because presumably more data will be written there
* (including the missing VID header), and then we'll
* move it.
*/
dbg_wl("PEB %d has no VID header", e1->pnum);
protect = 1;
goto out_not_moved;
} else if (err == UBI_IO_FF_BITFLIPS) {
/*
* The same situation as %UBI_IO_FF, but bit-flips were
* detected. It is better to schedule this PEB for
* scrubbing.
*/
dbg_wl("PEB %d has no VID header but has bit-flips",
e1->pnum);
scrubbing = 1;
goto out_not_moved;
} else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
/*
* While a full scan would detect interrupted erasures
* at attach time we can face them here when attached from
* Fastmap.
*/
dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
e1->pnum);
erase = 1;
goto out_not_moved;
}
ubi_err(ubi, "error %d while reading VID header from PEB %d",
err, e1->pnum);
goto out_error;
}
vol_id = be32_to_cpu(vid_hdr->vol_id);
lnum = be32_to_cpu(vid_hdr->lnum);
err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
if (err) {
if (err == MOVE_CANCEL_RACE) {
/*
* The LEB has not been moved because the volume is
* being deleted or the PEB has been put meanwhile. We
* should prevent this PEB from being selected for
* wear-leveling movement again, so put it to the
* protection queue.
*/
protect = 1;
dst_leb_clean = 1;
goto out_not_moved;
}
if (err == MOVE_RETRY) {
scrubbing = 1;
dst_leb_clean = 1;
goto out_not_moved;
}
if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
err == MOVE_TARGET_RD_ERR) {
/*
* Target PEB had bit-flips or write error - torture it.
*/
torture = 1;
keep = 1;
goto out_not_moved;
}
if (err == MOVE_SOURCE_RD_ERR) {
/*
* An error happened while reading the source PEB. Do
* not switch to R/O mode in this case, and give the
* upper layers a possibility to recover from this,
* e.g. by unmapping corresponding LEB. Instead, just
* put this PEB to the @ubi->erroneous list to prevent
* UBI from trying to move it over and over again.
*/
if (ubi->erroneous_peb_count > ubi->max_erroneous) {
ubi_err(ubi, "too many erroneous eraseblocks (%d)",
ubi->erroneous_peb_count);
goto out_error;
}
dst_leb_clean = 1;
erroneous = 1;
goto out_not_moved;
}
if (err < 0)
goto out_error;
ubi_assert(0);
}
/* The PEB has been successfully moved */
if (scrubbing)
ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
e1->pnum, vol_id, lnum, e2->pnum);
ubi_free_vid_buf(vidb);
spin_lock(&ubi->wl_lock);
if (!ubi->move_to_put) {
wl_tree_add(e2, &ubi->used);
e2 = NULL;
}
ubi->move_from = ubi->move_to = NULL;
ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
if (err) {
if (e2) {
spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e2);
spin_unlock(&ubi->wl_lock);
}
goto out_ro;
}
if (e2) {
/*
* Well, the target PEB was put meanwhile, schedule it for
* erasure.
*/
dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
e2->pnum, vol_id, lnum);
err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
if (err)
goto out_ro;
}
dbg_wl("done");
mutex_unlock(&ubi->move_mutex);
up_read(&ubi->fm_eba_sem);
return 0;
/*
* For some reasons the LEB was not moved, might be an error, might be
* something else. @e1 was not changed, so return it back. @e2 might
* have been changed, schedule it for erasure.
*/
out_not_moved:
if (vol_id != -1)
dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
e1->pnum, vol_id, lnum, e2->pnum, err);
else
dbg_wl("cancel moving PEB %d to PEB %d (%d)",
e1->pnum, e2->pnum, err);
spin_lock(&ubi->wl_lock);
if (protect)
prot_queue_add(ubi, e1);
else if (erroneous) {
wl_tree_add(e1, &ubi->erroneous);
ubi->erroneous_peb_count += 1;
} else if (scrubbing)
wl_tree_add(e1, &ubi->scrub);
else if (keep)
wl_tree_add(e1, &ubi->used);
if (dst_leb_clean) {
wl_tree_add(e2, &ubi->free);
ubi->free_count++;
}
ubi_assert(!ubi->move_to_put);
ubi->move_from = ubi->move_to = NULL;
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
ubi_free_vid_buf(vidb);
if (dst_leb_clean) {
ensure_wear_leveling(ubi, 1);
} else {
err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
if (err)
goto out_ro;
}
if (erase) {
err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
if (err)
goto out_ro;
}
mutex_unlock(&ubi->move_mutex);
up_read(&ubi->fm_eba_sem);
return 0;
out_error:
if (vol_id != -1)
ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
err, e1->pnum, e2->pnum);
else
ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
err, e1->pnum, vol_id, lnum, e2->pnum);
spin_lock(&ubi->wl_lock);
ubi->move_from = ubi->move_to = NULL;
ubi->move_to_put = ubi->wl_scheduled = 0;
wl_entry_destroy(ubi, e1);
wl_entry_destroy(ubi, e2);
spin_unlock(&ubi->wl_lock);
ubi_free_vid_buf(vidb);
out_ro:
ubi_ro_mode(ubi);
mutex_unlock(&ubi->move_mutex);
up_read(&ubi->fm_eba_sem);
ubi_assert(err != 0);
return err < 0 ? err : -EIO;
out_cancel:
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
mutex_unlock(&ubi->move_mutex);
up_read(&ubi->fm_eba_sem);
ubi_free_vid_buf(vidb);
return 0;
}
/**
* ensure_wear_leveling - schedule wear-leveling if it is needed.
* @ubi: UBI device description object
* @nested: set to non-zero if this function is called from UBI worker
*
* This function checks if it is time to start wear-leveling and schedules it
* if yes. This function returns zero in case of success and a negative error
* code in case of failure.
*/
static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
{
int err = 0;
struct ubi_work *wrk;
spin_lock(&ubi->wl_lock);
if (ubi->wl_scheduled)
/* Wear-leveling is already in the work queue */
goto out_unlock;
/*
* If the ubi->scrub tree is not empty, scrubbing is needed, and the
* WL worker has to be scheduled anyway.
*/
if (!ubi->scrub.rb_node) {
#ifdef CONFIG_MTD_UBI_FASTMAP
if (!need_wear_leveling(ubi))
goto out_unlock;
#else
struct ubi_wl_entry *e1;
struct ubi_wl_entry *e2;
if (!ubi->used.rb_node || !ubi->free.rb_node)
/* No physical eraseblocks - no deal */
goto out_unlock;
/*
* We schedule wear-leveling only if the difference between the
* lowest erase counter of used physical eraseblocks and a high
* erase counter of free physical eraseblocks is greater than
* %UBI_WL_THRESHOLD.
*/
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
goto out_unlock;
#endif
dbg_wl("schedule wear-leveling");
} else
dbg_wl("schedule scrubbing");
ubi->wl_scheduled = 1;
spin_unlock(&ubi->wl_lock);
wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
if (!wrk) {
err = -ENOMEM;
goto out_cancel;
}
wrk->func = &wear_leveling_worker;
if (nested)
__schedule_ubi_work(ubi, wrk);
else
schedule_ubi_work(ubi, wrk);
return err;
out_cancel:
spin_lock(&ubi->wl_lock);
ubi->wl_scheduled = 0;
out_unlock:
spin_unlock(&ubi->wl_lock);
return err;
}
/**
* __erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
* @wl_wrk: the work object
*
* This function erases a physical eraseblock and perform torture testing if
* needed. It also takes care about marking the physical eraseblock bad if
* needed. Returns zero in case of success and a negative error code in case of
* failure.
*/
static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
{
struct ubi_wl_entry *e = wl_wrk->e;
int pnum = e->pnum;
int vol_id = wl_wrk->vol_id;
int lnum = wl_wrk->lnum;
int err, available_consumed = 0;
dbg_wl("erase PEB %d EC %d LEB %d:%d",
pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
spin_lock(&ubi->wl_lock);
if (!ubi->fm_disabled && !ubi->fm_anchor &&
e->pnum < UBI_FM_MAX_START) {
/*
* Abort anchor production, if needed it will be
* enabled again in the wear leveling started below.
*/
ubi->fm_anchor = e;
ubi->fm_do_produce_anchor = 0;
} else {
wl_tree_add(e, &ubi->free);
ubi->free_count++;
}
spin_unlock(&ubi->wl_lock);
/*
* One more erase operation has happened, take care about
* protected physical eraseblocks.
*/
serve_prot_queue(ubi);
/* And take care about wear-leveling */
err = ensure_wear_leveling(ubi, 1);
return err;
}
ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
err == -EBUSY) {
int err1;
/* Re-schedule the LEB for erasure */
err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
if (err1) {
spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
spin_unlock(&ubi->wl_lock);
err = err1;
goto out_ro;
}
return err;
}
spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
spin_unlock(&ubi->wl_lock);
if (err != -EIO)
/*
* If this is not %-EIO, we have no idea what to do. Scheduling
* this physical eraseblock for erasure again would cause
* errors again and again. Well, lets switch to R/O mode.
*/
goto out_ro;
/* It is %-EIO, the PEB went bad */
if (!ubi->bad_allowed) {
ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
goto out_ro;
}
spin_lock(&ubi->volumes_lock);
if (ubi->beb_rsvd_pebs == 0) {
if (ubi->avail_pebs == 0) {
spin_unlock(&ubi->volumes_lock);
ubi_err(ubi, "no reserved/available physical eraseblocks");
goto out_ro;
}
ubi->avail_pebs -= 1;
available_consumed = 1;
}
spin_unlock(&ubi->volumes_lock);
ubi_msg(ubi, "mark PEB %d as bad", pnum);
err = ubi_io_mark_bad(ubi, pnum);
if (err)
goto out_ro;
spin_lock(&ubi->volumes_lock);
if (ubi->beb_rsvd_pebs > 0) {
if (available_consumed) {
/*
* The amount of reserved PEBs increased since we last
* checked.
*/
ubi->avail_pebs += 1;
available_consumed = 0;
}
ubi->beb_rsvd_pebs -= 1;
}
ubi->bad_peb_count += 1;
ubi->good_peb_count -= 1;
ubi_calculate_reserved(ubi);
if (available_consumed)
ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
else if (ubi->beb_rsvd_pebs)
ubi_msg(ubi, "%d PEBs left in the reserve",
ubi->beb_rsvd_pebs);
else
ubi_warn(ubi, "last PEB from the reserve was used");
spin_unlock(&ubi->volumes_lock);
return err;
out_ro:
if (available_consumed) {
spin_lock(&ubi->volumes_lock);
ubi->avail_pebs += 1;
spin_unlock(&ubi->volumes_lock);
}
ubi_ro_mode(ubi);
return err;
}
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int shutdown)
{
int ret;
if (shutdown) {
struct ubi_wl_entry *e = wl_wrk->e;
dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
kfree(wl_wrk);
wl_entry_destroy(ubi, e);
return 0;
}
ret = __erase_worker(ubi, wl_wrk);
kfree(wl_wrk);
return ret;
}
/**
* ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
* @ubi: UBI device description object
* @vol_id: the volume ID that last used this PEB
* @lnum: the last used logical eraseblock number for the PEB
* @pnum: physical eraseblock to return
* @torture: if this physical eraseblock has to be tortured
*
* This function is called to return physical eraseblock @pnum to the pool of
* free physical eraseblocks. The @torture flag has to be set if an I/O error
* occurred to this @pnum and it has to be tested. This function returns zero
* in case of success, and a negative error code in case of failure.
*/
int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
int pnum, int torture)
{
int err;
struct ubi_wl_entry *e;
dbg_wl("PEB %d", pnum);
ubi_assert(pnum >= 0);
ubi_assert(pnum < ubi->peb_count);
down_read(&ubi->fm_protect);
retry:
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
if (!e) {
/*
* This wl entry has been removed for some errors by other
* process (eg. wear leveling worker), corresponding process
* (except __erase_worker, which cannot concurrent with
* ubi_wl_put_peb) will set ubi ro_mode at the same time,
* just ignore this wl entry.
*/
spin_unlock(&ubi->wl_lock);
up_read(&ubi->fm_protect);
return 0;
}
if (e == ubi->move_from) {
/*
* User is putting the physical eraseblock which was selected to
* be moved. It will be scheduled for erasure in the
* wear-leveling worker.
*/
dbg_wl("PEB %d is being moved, wait", pnum);
spin_unlock(&ubi->wl_lock);
/* Wait for the WL worker by taking the @ubi->move_mutex */
mutex_lock(&ubi->move_mutex);
mutex_unlock(&ubi->move_mutex);
goto retry;
} else if (e == ubi->move_to) {
/*
* User is putting the physical eraseblock which was selected
* as the target the data is moved to. It may happen if the EBA
* sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
* but the WL sub-system has not put the PEB to the "used" tree
* yet, but it is about to do this. So we just set a flag which
* will tell the WL worker that the PEB is not needed anymore
* and should be scheduled for erasure.
*/
dbg_wl("PEB %d is the target of data moving", pnum);
ubi_assert(!ubi->move_to_put);
ubi->move_to_put = 1;
spin_unlock(&ubi->wl_lock);
up_read(&ubi->fm_protect);
return 0;
} else {
if (in_wl_tree(e, &ubi->used)) {
self_check_in_wl_tree(ubi, e, &ubi->used);
rb_erase(&e->u.rb, &ubi->used);
} else if (in_wl_tree(e, &ubi->scrub)) {
self_check_in_wl_tree(ubi, e, &ubi->scrub);
rb_erase(&e->u.rb, &ubi->scrub);
} else if (in_wl_tree(e, &ubi->erroneous)) {
self_check_in_wl_tree(ubi, e, &ubi->erroneous);
rb_erase(&e->u.rb, &ubi->erroneous);
ubi->erroneous_peb_count -= 1;
ubi_assert(ubi->erroneous_peb_count >= 0);
/* Erroneous PEBs should be tortured */
torture = 1;
} else {
err = prot_queue_del(ubi, e->pnum);
if (err) {
ubi_err(ubi, "PEB %d not found", pnum);
ubi_ro_mode(ubi);
spin_unlock(&ubi->wl_lock);
up_read(&ubi->fm_protect);
return err;
}
}
}
spin_unlock(&ubi->wl_lock);
err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
if (err) {
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->used);
spin_unlock(&ubi->wl_lock);
}
up_read(&ubi->fm_protect);
return err;
}
/**
* ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
* @ubi: UBI device description object
* @pnum: the physical eraseblock to schedule
*
* If a bit-flip in a physical eraseblock is detected, this physical eraseblock
* needs scrubbing. This function schedules a physical eraseblock for
* scrubbing which is done in background. This function returns zero in case of
* success and a negative error code in case of failure.
*/
int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
{
struct ubi_wl_entry *e;
ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
retry:
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
in_wl_tree(e, &ubi->erroneous)) {
spin_unlock(&ubi->wl_lock);
return 0;
}
if (e == ubi->move_to) {
/*
* This physical eraseblock was used to move data to. The data
* was moved but the PEB was not yet inserted to the proper
* tree. We should just wait a little and let the WL worker
* proceed.
*/
spin_unlock(&ubi->wl_lock);
dbg_wl("the PEB %d is not in proper tree, retry", pnum);
yield();
goto retry;
}
if (in_wl_tree(e, &ubi->used)) {
self_check_in_wl_tree(ubi, e, &ubi->used);
rb_erase(&e->u.rb, &ubi->used);
} else {
int err;
err = prot_queue_del(ubi, e->pnum);
if (err) {
ubi_err(ubi, "PEB %d not found", pnum);
ubi_ro_mode(ubi);
spin_unlock(&ubi->wl_lock);
return err;
}
}
wl_tree_add(e, &ubi->scrub);
spin_unlock(&ubi->wl_lock);
/*
* Technically scrubbing is the same as wear-leveling, so it is done
* by the WL worker.
*/
return ensure_wear_leveling(ubi, 0);
}
/**
* ubi_wl_flush - flush all pending works.
* @ubi: UBI device description object
* @vol_id: the volume id to flush for
* @lnum: the logical eraseblock number to flush for
*
* This function executes all pending works for a particular volume id /
* logical eraseblock number pair. If either value is set to %UBI_ALL, then it
* acts as a wildcard for all of the corresponding volume numbers or logical
* eraseblock numbers. It returns zero in case of success and a negative error
* code in case of failure.
*/
int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
{
int err = 0;
int found = 1;
/*
* Erase while the pending works queue is not empty, but not more than
* the number of currently pending works.
*/
dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
vol_id, lnum, ubi->works_count);
while (found) {
struct ubi_work *wrk, *tmp;
found = 0;
down_read(&ubi->work_sem);
spin_lock(&ubi->wl_lock);
list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
(lnum == UBI_ALL || wrk->lnum == lnum)) {
list_del(&wrk->list);
ubi->works_count -= 1;
ubi_assert(ubi->works_count >= 0);
spin_unlock(&ubi->wl_lock);
err = wrk->func(ubi, wrk, 0);
if (err) {
up_read(&ubi->work_sem);
return err;
}
spin_lock(&ubi->wl_lock);
found = 1;
break;
}
}
spin_unlock(&ubi->wl_lock);
up_read(&ubi->work_sem);
}
/*
* Make sure all the works which have been done in parallel are
* finished.
*/
down_write(&ubi->work_sem);
up_write(&ubi->work_sem);
return err;
}
static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
{
if (in_wl_tree(e, &ubi->scrub))
return false;
else if (in_wl_tree(e, &ubi->erroneous))
return false;
else if (ubi->move_from == e)
return false;
else if (ubi->move_to == e)
return false;
return true;
}
/**
* ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
* @ubi: UBI device description object
* @pnum: the physical eraseblock to schedule
* @force: don't read the block, assume bitflips happened and take action.
*
* This function reads the given eraseblock and checks if bitflips occured.
* In case of bitflips, the eraseblock is scheduled for scrubbing.
* If scrubbing is forced with @force, the eraseblock is not read,
* but scheduled for scrubbing right away.
*
* Returns:
* %EINVAL, PEB is out of range
* %ENOENT, PEB is no longer used by UBI
* %EBUSY, PEB cannot be checked now or a check is currently running on it
* %EAGAIN, bit flips happened but scrubbing is currently not possible
* %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
* %0, no bit flips detected
*/
int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
{
int err = 0;
struct ubi_wl_entry *e;
if (pnum < 0 || pnum >= ubi->peb_count) {
err = -EINVAL;
goto out;
}
/*
* Pause all parallel work, otherwise it can happen that the
* erase worker frees a wl entry under us.
*/
down_write(&ubi->work_sem);
/*
* Make sure that the wl entry does not change state while
* inspecting it.
*/
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
if (!e) {
spin_unlock(&ubi->wl_lock);
err = -ENOENT;
goto out_resume;
}
/*
* Does it make sense to check this PEB?
*/
if (!scrub_possible(ubi, e)) {
spin_unlock(&ubi->wl_lock);
err = -EBUSY;
goto out_resume;
}
spin_unlock(&ubi->wl_lock);
if (!force) {
mutex_lock(&ubi->buf_mutex);
err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
mutex_unlock(&ubi->buf_mutex);
}
if (force || err == UBI_IO_BITFLIPS) {
/*
* Okay, bit flip happened, let's figure out what we can do.
*/
spin_lock(&ubi->wl_lock);
/*
* Recheck. We released wl_lock, UBI might have killed the
* wl entry under us.
*/
e = ubi->lookuptbl[pnum];
if (!e) {
spin_unlock(&ubi->wl_lock);
err = -ENOENT;
goto out_resume;
}
/*
* Need to re-check state
*/
if (!scrub_possible(ubi, e)) {
spin_unlock(&ubi->wl_lock);
err = -EBUSY;
goto out_resume;
}
if (in_pq(ubi, e)) {
prot_queue_del(ubi, e->pnum);
wl_tree_add(e, &ubi->scrub);
spin_unlock(&ubi->wl_lock);
err = ensure_wear_leveling(ubi, 1);
} else if (in_wl_tree(e, &ubi->used)) {
rb_erase(&e->u.rb, &ubi->used);
wl_tree_add(e, &ubi->scrub);
spin_unlock(&ubi->wl_lock);
err = ensure_wear_leveling(ubi, 1);
} else if (in_wl_tree(e, &ubi->free)) {
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
spin_unlock(&ubi->wl_lock);
/*
* This PEB is empty we can schedule it for
* erasure right away. No wear leveling needed.
*/
err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
force ? 0 : 1, true);
} else {
spin_unlock(&ubi->wl_lock);
err = -EAGAIN;
}
if (!err && !force)
err = -EUCLEAN;
} else {
err = 0;
}
out_resume:
up_write(&ubi->work_sem);
out:
return err;
}
/**
* tree_destroy - destroy an RB-tree.
* @ubi: UBI device description object
* @root: the root of the tree to destroy
*/
static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
{
struct rb_node *rb;
struct ubi_wl_entry *e;
rb = root->rb_node;
while (rb) {
if (rb->rb_left)
rb = rb->rb_left;
else if (rb->rb_right)
rb = rb->rb_right;
else {
e = rb_entry(rb, struct ubi_wl_entry, u.rb);
rb = rb_parent(rb);
if (rb) {
if (rb->rb_left == &e->u.rb)
rb->rb_left = NULL;
else
rb->rb_right = NULL;
}
wl_entry_destroy(ubi, e);
}
}
}
/**
* ubi_thread - UBI background thread.
* @u: the UBI device description object pointer
*/
int ubi_thread(void *u)
{
int failures = 0;
struct ubi_device *ubi = u;
ubi_msg(ubi, "background thread \"%s\" started, PID %d",
ubi->bgt_name, task_pid_nr(current));
set_freezable();
for (;;) {
int err;
if (kthread_should_stop())
break;
if (try_to_freeze())
continue;
spin_lock(&ubi->wl_lock);
if (list_empty(&ubi->works) || ubi->ro_mode ||
!ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&ubi->wl_lock);
/*
* Check kthread_should_stop() after we set the task
* state to guarantee that we either see the stop bit
* and exit or the task state is reset to runnable such
* that it's not scheduled out indefinitely and detects
* the stop bit at kthread_should_stop().
*/
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
break;
}
schedule();
continue;
}
spin_unlock(&ubi->wl_lock);
err = do_work(ubi);
if (err) {
ubi_err(ubi, "%s: work failed with error code %d",
ubi->bgt_name, err);
if (failures++ > WL_MAX_FAILURES) {
/*
* Too many failures, disable the thread and
* switch to read-only mode.
*/
ubi_msg(ubi, "%s: %d consecutive failures",
ubi->bgt_name, WL_MAX_FAILURES);
ubi_ro_mode(ubi);
ubi->thread_enabled = 0;
continue;
}
} else
failures = 0;
cond_resched();
}
dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
ubi->thread_enabled = 0;
return 0;
}
/**
* shutdown_work - shutdown all pending works.
* @ubi: UBI device description object
*/
static void shutdown_work(struct ubi_device *ubi)
{
while (!list_empty(&ubi->works)) {
struct ubi_work *wrk;
wrk = list_entry(ubi->works.next, struct ubi_work, list);
list_del(&wrk->list);
wrk->func(ubi, wrk, 1);
ubi->works_count -= 1;
ubi_assert(ubi->works_count >= 0);
}
}
/**
* erase_aeb - erase a PEB given in UBI attach info PEB
* @ubi: UBI device description object
* @aeb: UBI attach info PEB
* @sync: If true, erase synchronously. Otherwise schedule for erasure
*/
static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
{
struct ubi_wl_entry *e;
int err;
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
return -ENOMEM;
e->pnum = aeb->pnum;
e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
if (sync) {
err = sync_erase(ubi, e, false);
if (err)
goto out_free;
wl_tree_add(e, &ubi->free);
ubi->free_count++;
} else {
err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
if (err)
goto out_free;
}
return 0;
out_free:
wl_entry_destroy(ubi, e);
return err;
}
/**
* ubi_wl_init - initialize the WL sub-system using attaching information.
* @ubi: UBI device description object
* @ai: attaching information
*
* This function returns zero in case of success, and a negative error code in
* case of failure.
*/
int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int err, i, reserved_pebs, found_pebs = 0;
struct rb_node *rb1, *rb2;
struct ubi_ainf_volume *av;
struct ubi_ainf_peb *aeb, *tmp;
struct ubi_wl_entry *e;
ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
spin_lock_init(&ubi->wl_lock);
mutex_init(&ubi->move_mutex);
init_rwsem(&ubi->work_sem);
ubi->max_ec = ai->max_ec;
INIT_LIST_HEAD(&ubi->works);
sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
err = -ENOMEM;
ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
if (!ubi->lookuptbl)
return err;
for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
INIT_LIST_HEAD(&ubi->pq[i]);
ubi->pq_head = 0;
ubi->free_count = 0;
list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
cond_resched();
err = erase_aeb(ubi, aeb, false);
if (err)
goto out_free;
found_pebs++;
}
list_for_each_entry(aeb, &ai->free, u.list) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e) {
err = -ENOMEM;
goto out_free;
}
e->pnum = aeb->pnum;
e->ec = aeb->ec;
ubi_assert(e->ec >= 0);
wl_tree_add(e, &ubi->free);
ubi->free_count++;
ubi->lookuptbl[e->pnum] = e;
found_pebs++;
}
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e) {
err = -ENOMEM;
goto out_free;
}
e->pnum = aeb->pnum;
e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
if (!aeb->scrub) {
dbg_wl("add PEB %d EC %d to the used tree",
e->pnum, e->ec);
wl_tree_add(e, &ubi->used);
} else {
dbg_wl("add PEB %d EC %d to the scrub tree",
e->pnum, e->ec);
wl_tree_add(e, &ubi->scrub);
}
found_pebs++;
}
}
list_for_each_entry(aeb, &ai->fastmap, u.list) {
cond_resched();
e = ubi_find_fm_block(ubi, aeb->pnum);
if (e) {
ubi_assert(!ubi->lookuptbl[e->pnum]);
ubi->lookuptbl[e->pnum] = e;
} else {
bool sync = false;
/*
* Usually old Fastmap PEBs are scheduled for erasure
* and we don't have to care about them but if we face
* an power cut before scheduling them we need to
* take care of them here.
*/
if (ubi->lookuptbl[aeb->pnum])
continue;
/*
* The fastmap update code might not find a free PEB for
* writing the fastmap anchor to and then reuses the
* current fastmap anchor PEB. When this PEB gets erased
* and a power cut happens before it is written again we
* must make sure that the fastmap attach code doesn't
* find any outdated fastmap anchors, hence we erase the
* outdated fastmap anchor PEBs synchronously here.
*/
if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
sync = true;
err = erase_aeb(ubi, aeb, sync);
if (err)
goto out_free;
}
found_pebs++;
}
dbg_wl("found %i PEBs", found_pebs);
ubi_assert(ubi->good_peb_count == found_pebs);
reserved_pebs = WL_RESERVED_PEBS;
ubi_fastmap_init(ubi, &reserved_pebs);
if (ubi->avail_pebs < reserved_pebs) {
ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
ubi->avail_pebs, reserved_pebs);
if (ubi->corr_peb_count)
ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
err = -ENOSPC;
goto out_free;
}
ubi->avail_pebs -= reserved_pebs;
ubi->rsvd_pebs += reserved_pebs;
/* Schedule wear-leveling if needed */
err = ensure_wear_leveling(ubi, 0);
if (err)
goto out_free;
#ifdef CONFIG_MTD_UBI_FASTMAP
if (!ubi->ro_mode && !ubi->fm_disabled)
ubi_ensure_anchor_pebs(ubi);
#endif
return 0;
out_free:
shutdown_work(ubi);
tree_destroy(ubi, &ubi->used);
tree_destroy(ubi, &ubi->free);
tree_destroy(ubi, &ubi->scrub);
kfree(ubi->lookuptbl);
return err;
}
/**
* protection_queue_destroy - destroy the protection queue.
* @ubi: UBI device description object
*/
static void protection_queue_destroy(struct ubi_device *ubi)
{
int i;
struct ubi_wl_entry *e, *tmp;
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
list_del(&e->u.list);
wl_entry_destroy(ubi, e);
}
}
}
/**
* ubi_wl_close - close the wear-leveling sub-system.
* @ubi: UBI device description object
*/
void ubi_wl_close(struct ubi_device *ubi)
{
dbg_wl("close the WL sub-system");
ubi_fastmap_close(ubi);
shutdown_work(ubi);
protection_queue_destroy(ubi);
tree_destroy(ubi, &ubi->used);
tree_destroy(ubi, &ubi->erroneous);
tree_destroy(ubi, &ubi->free);
tree_destroy(ubi, &ubi->scrub);
kfree(ubi->lookuptbl);
}
/**
* self_check_ec - make sure that the erase counter of a PEB is correct.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
* @ec: the erase counter to check
*
* This function returns zero if the erase counter of physical eraseblock @pnum
* is equivalent to @ec, and a negative error code if not or if an error
* occurred.
*/
static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
{
int err;
long long read_ec;
struct ubi_ec_hdr *ec_hdr;
if (!ubi_dbg_chk_gen(ubi))
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
if (!ec_hdr)
return -ENOMEM;
err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
if (err && err != UBI_IO_BITFLIPS) {
/* The header does not have to exist */
err = 0;
goto out_free;
}
read_ec = be64_to_cpu(ec_hdr->ec);
if (ec != read_ec && read_ec - ec > 1) {
ubi_err(ubi, "self-check failed for PEB %d", pnum);
ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
dump_stack();
err = 1;
} else
err = 0;
out_free:
kfree(ec_hdr);
return err;
}
/**
* self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
* @ubi: UBI device description object
* @e: the wear-leveling entry to check
* @root: the root of the tree
*
* This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
* is not.
*/
static int self_check_in_wl_tree(const struct ubi_device *ubi,
struct ubi_wl_entry *e, struct rb_root *root)
{
if (!ubi_dbg_chk_gen(ubi))
return 0;
if (in_wl_tree(e, root))
return 0;
ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
e->pnum, e->ec, root);
dump_stack();
return -EINVAL;
}
/**
* self_check_in_pq - check if wear-leveling entry is in the protection
* queue.
* @ubi: UBI device description object
* @e: the wear-leveling entry to check
*
* This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
*/
static int self_check_in_pq(const struct ubi_device *ubi,
struct ubi_wl_entry *e)
{
if (!ubi_dbg_chk_gen(ubi))
return 0;
if (in_pq(ubi, e))
return 0;
ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
e->pnum, e->ec);
dump_stack();
return -EINVAL;
}
#ifndef CONFIG_MTD_UBI_FASTMAP
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
{
struct ubi_wl_entry *e;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
self_check_in_wl_tree(ubi, e, &ubi->free);
ubi->free_count--;
ubi_assert(ubi->free_count >= 0);
rb_erase(&e->u.rb, &ubi->free);
return e;
}
/**
* produce_free_peb - produce a free physical eraseblock.
* @ubi: UBI device description object
*
* This function tries to make a free PEB by means of synchronous execution of
* pending works. This may be needed if, for example the background thread is
* disabled. Returns zero in case of success and a negative error code in case
* of failure.
*/
static int produce_free_peb(struct ubi_device *ubi)
{
int err;
while (!ubi->free.rb_node && ubi->works_count) {
spin_unlock(&ubi->wl_lock);
dbg_wl("do one work synchronously");
err = do_work(ubi);
spin_lock(&ubi->wl_lock);
if (err)
return err;
}
return 0;
}
/**
* ubi_wl_get_peb - get a physical eraseblock.
* @ubi: UBI device description object
*
* This function returns a physical eraseblock in case of success and a
* negative error code in case of failure.
* Returns with ubi->fm_eba_sem held in read mode!
*/
int ubi_wl_get_peb(struct ubi_device *ubi)
{
int err;
struct ubi_wl_entry *e;
retry:
down_read(&ubi->fm_eba_sem);
spin_lock(&ubi->wl_lock);
if (!ubi->free.rb_node) {
if (ubi->works_count == 0) {
ubi_err(ubi, "no free eraseblocks");
ubi_assert(list_empty(&ubi->works));
spin_unlock(&ubi->wl_lock);
return -ENOSPC;
}
err = produce_free_peb(ubi);
if (err < 0) {
spin_unlock(&ubi->wl_lock);
return err;
}
spin_unlock(&ubi->wl_lock);
up_read(&ubi->fm_eba_sem);
goto retry;
}
e = wl_get_wle(ubi);
prot_queue_add(ubi, e);
spin_unlock(&ubi->wl_lock);
err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
ubi->peb_size - ubi->vid_hdr_aloffset);
if (err) {
ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
return err;
}
return e->pnum;
}
#else
#include "fastmap-wl.c"
#endif
| linux-master | drivers/mtd/ubi/wl.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) International Business Machines Corp., 2006
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/*
* UBI attaching sub-system.
*
* This sub-system is responsible for attaching MTD devices and it also
* implements flash media scanning.
*
* The attaching information is represented by a &struct ubi_attach_info'
* object. Information about volumes is represented by &struct ubi_ainf_volume
* objects which are kept in volume RB-tree with root at the @volumes field.
* The RB-tree is indexed by the volume ID.
*
* Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These
* objects are kept in per-volume RB-trees with the root at the corresponding
* &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of
* per-volume objects and each of these objects is the root of RB-tree of
* per-LEB objects.
*
* Corrupted physical eraseblocks are put to the @corr list, free physical
* eraseblocks are put to the @free list and the physical eraseblock to be
* erased are put to the @erase list.
*
* About corruptions
* ~~~~~~~~~~~~~~~~~
*
* UBI protects EC and VID headers with CRC-32 checksums, so it can detect
* whether the headers are corrupted or not. Sometimes UBI also protects the
* data with CRC-32, e.g., when it executes the atomic LEB change operation, or
* when it moves the contents of a PEB for wear-leveling purposes.
*
* UBI tries to distinguish between 2 types of corruptions.
*
* 1. Corruptions caused by power cuts. These are expected corruptions and UBI
* tries to handle them gracefully, without printing too many warnings and
* error messages. The idea is that we do not lose important data in these
* cases - we may lose only the data which were being written to the media just
* before the power cut happened, and the upper layers (e.g., UBIFS) are
* supposed to handle such data losses (e.g., by using the FS journal).
*
* When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
* the reason is a power cut, UBI puts this PEB to the @erase list, and all
* PEBs in the @erase list are scheduled for erasure later.
*
* 2. Unexpected corruptions which are not caused by power cuts. During
* attaching, such PEBs are put to the @corr list and UBI preserves them.
* Obviously, this lessens the amount of available PEBs, and if at some point
* UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
* about such PEBs every time the MTD device is attached.
*
* However, it is difficult to reliably distinguish between these types of
* corruptions and UBI's strategy is as follows (in case of attaching by
* scanning). UBI assumes corruption type 2 if the VID header is corrupted and
* the data area does not contain all 0xFFs, and there were no bit-flips or
* integrity errors (e.g., ECC errors in case of NAND) while reading the data
* area. Otherwise UBI assumes corruption type 1. So the decision criteria
* are as follows.
* o If the data area contains only 0xFFs, there are no data, and it is safe
* to just erase this PEB - this is corruption type 1.
* o If the data area has bit-flips or data integrity errors (ECC errors on
* NAND), it is probably a PEB which was being erased when power cut
* happened, so this is corruption type 1. However, this is just a guess,
* which might be wrong.
* o Otherwise this is corruption type 2.
*/
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/crc32.h>
#include <linux/math64.h>
#include <linux/random.h>
#include "ubi.h"
static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
#define AV_FIND BIT(0)
#define AV_ADD BIT(1)
#define AV_FIND_OR_ADD (AV_FIND | AV_ADD)
/**
* find_or_add_av - internal function to find a volume, add a volume or do
* both (find and add if missing).
* @ai: attaching information
* @vol_id: the requested volume ID
* @flags: a combination of the %AV_FIND and %AV_ADD flags describing the
* expected operation. If only %AV_ADD is set, -EEXIST is returned
* if the volume already exists. If only %AV_FIND is set, NULL is
* returned if the volume does not exist. And if both flags are
* set, the helper first tries to find an existing volume, and if
* it does not exist it creates a new one.
* @created: in value used to inform the caller whether it"s a newly created
* volume or not.
*
* This function returns a pointer to a volume description or an ERR_PTR if
* the operation failed. It can also return NULL if only %AV_FIND is set and
* the volume does not exist.
*/
static struct ubi_ainf_volume *find_or_add_av(struct ubi_attach_info *ai,
int vol_id, unsigned int flags,
bool *created)
{
struct ubi_ainf_volume *av;
struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
/* Walk the volume RB-tree to look if this volume is already present */
while (*p) {
parent = *p;
av = rb_entry(parent, struct ubi_ainf_volume, rb);
if (vol_id == av->vol_id) {
*created = false;
if (!(flags & AV_FIND))
return ERR_PTR(-EEXIST);
return av;
}
if (vol_id > av->vol_id)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
if (!(flags & AV_ADD))
return NULL;
/* The volume is absent - add it */
av = kzalloc(sizeof(*av), GFP_KERNEL);
if (!av)
return ERR_PTR(-ENOMEM);
av->vol_id = vol_id;
if (vol_id > ai->highest_vol_id)
ai->highest_vol_id = vol_id;
rb_link_node(&av->rb, parent, p);
rb_insert_color(&av->rb, &ai->volumes);
ai->vols_found += 1;
*created = true;
dbg_bld("added volume %d", vol_id);
return av;
}
/**
* ubi_find_or_add_av - search for a volume in the attaching information and
* add one if it does not exist.
* @ai: attaching information
* @vol_id: the requested volume ID
* @created: whether the volume has been created or not
*
* This function returns a pointer to the new volume description or an
* ERR_PTR if the operation failed.
*/
static struct ubi_ainf_volume *ubi_find_or_add_av(struct ubi_attach_info *ai,
int vol_id, bool *created)
{
return find_or_add_av(ai, vol_id, AV_FIND_OR_ADD, created);
}
/**
* ubi_alloc_aeb - allocate an aeb element
* @ai: attaching information
* @pnum: physical eraseblock number
* @ec: erase counter of the physical eraseblock
*
* Allocate an aeb object and initialize the pnum and ec information.
* vol_id and lnum are set to UBI_UNKNOWN, and the other fields are
* initialized to zero.
* Note that the element is not added in any list or RB tree.
*/
struct ubi_ainf_peb *ubi_alloc_aeb(struct ubi_attach_info *ai, int pnum,
int ec)
{
struct ubi_ainf_peb *aeb;
aeb = kmem_cache_zalloc(ai->aeb_slab_cache, GFP_KERNEL);
if (!aeb)
return NULL;
aeb->pnum = pnum;
aeb->ec = ec;
aeb->vol_id = UBI_UNKNOWN;
aeb->lnum = UBI_UNKNOWN;
return aeb;
}
/**
* ubi_free_aeb - free an aeb element
* @ai: attaching information
* @aeb: the element to free
*
* Free an aeb object. The caller must have removed the element from any list
* or RB tree.
*/
void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb)
{
kmem_cache_free(ai->aeb_slab_cache, aeb);
}
/**
* add_to_list - add physical eraseblock to a list.
* @ai: attaching information
* @pnum: physical eraseblock number to add
* @vol_id: the last used volume id for the PEB
* @lnum: the last used LEB number for the PEB
* @ec: erase counter of the physical eraseblock
* @to_head: if not zero, add to the head of the list
* @list: the list to add to
*
* This function allocates a 'struct ubi_ainf_peb' object for physical
* eraseblock @pnum and adds it to the "free", "erase", or "alien" lists.
* It stores the @lnum and @vol_id alongside, which can both be
* %UBI_UNKNOWN if they are not available, not readable, or not assigned.
* If @to_head is not zero, PEB will be added to the head of the list, which
* basically means it will be processed first later. E.g., we add corrupted
* PEBs (corrupted due to power cuts) to the head of the erase list to make
* sure we erase them first and get rid of corruptions ASAP. This function
* returns zero in case of success and a negative error code in case of
* failure.
*/
static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
int lnum, int ec, int to_head, struct list_head *list)
{
struct ubi_ainf_peb *aeb;
if (list == &ai->free) {
dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
} else if (list == &ai->erase) {
dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
} else if (list == &ai->alien) {
dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
ai->alien_peb_count += 1;
} else
BUG();
aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
aeb->vol_id = vol_id;
aeb->lnum = lnum;
if (to_head)
list_add(&aeb->u.list, list);
else
list_add_tail(&aeb->u.list, list);
return 0;
}
/**
* add_corrupted - add a corrupted physical eraseblock.
* @ai: attaching information
* @pnum: physical eraseblock number to add
* @ec: erase counter of the physical eraseblock
*
* This function allocates a 'struct ubi_ainf_peb' object for a corrupted
* physical eraseblock @pnum and adds it to the 'corr' list. The corruption
* was presumably not caused by a power cut. Returns zero in case of success
* and a negative error code in case of failure.
*/
static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
{
struct ubi_ainf_peb *aeb;
dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
ai->corr_peb_count += 1;
list_add(&aeb->u.list, &ai->corr);
return 0;
}
/**
* add_fastmap - add a Fastmap related physical eraseblock.
* @ai: attaching information
* @pnum: physical eraseblock number the VID header came from
* @vid_hdr: the volume identifier header
* @ec: erase counter of the physical eraseblock
*
* This function allocates a 'struct ubi_ainf_peb' object for a Fastamp
* physical eraseblock @pnum and adds it to the 'fastmap' list.
* Such blocks can be Fastmap super and data blocks from both the most
* recent Fastmap we're attaching from or from old Fastmaps which will
* be erased.
*/
static int add_fastmap(struct ubi_attach_info *ai, int pnum,
struct ubi_vid_hdr *vid_hdr, int ec)
{
struct ubi_ainf_peb *aeb;
aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
aeb->vol_id = be32_to_cpu(vid_hdr->vol_id);
aeb->sqnum = be64_to_cpu(vid_hdr->sqnum);
list_add(&aeb->u.list, &ai->fastmap);
dbg_bld("add to fastmap list: PEB %d, vol_id %d, sqnum: %llu", pnum,
aeb->vol_id, aeb->sqnum);
return 0;
}
/**
* validate_vid_hdr - check volume identifier header.
* @ubi: UBI device description object
* @vid_hdr: the volume identifier header to check
* @av: information about the volume this logical eraseblock belongs to
* @pnum: physical eraseblock number the VID header came from
*
* This function checks that data stored in @vid_hdr is consistent. Returns
* non-zero if an inconsistency was found and zero if not.
*
* Note, UBI does sanity check of everything it reads from the flash media.
* Most of the checks are done in the I/O sub-system. Here we check that the
* information in the VID header is consistent to the information in other VID
* headers of the same volume.
*/
static int validate_vid_hdr(const struct ubi_device *ubi,
const struct ubi_vid_hdr *vid_hdr,
const struct ubi_ainf_volume *av, int pnum)
{
int vol_type = vid_hdr->vol_type;
int vol_id = be32_to_cpu(vid_hdr->vol_id);
int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
int data_pad = be32_to_cpu(vid_hdr->data_pad);
if (av->leb_count != 0) {
int av_vol_type;
/*
* This is not the first logical eraseblock belonging to this
* volume. Ensure that the data in its VID header is consistent
* to the data in previous logical eraseblock headers.
*/
if (vol_id != av->vol_id) {
ubi_err(ubi, "inconsistent vol_id");
goto bad;
}
if (av->vol_type == UBI_STATIC_VOLUME)
av_vol_type = UBI_VID_STATIC;
else
av_vol_type = UBI_VID_DYNAMIC;
if (vol_type != av_vol_type) {
ubi_err(ubi, "inconsistent vol_type");
goto bad;
}
if (used_ebs != av->used_ebs) {
ubi_err(ubi, "inconsistent used_ebs");
goto bad;
}
if (data_pad != av->data_pad) {
ubi_err(ubi, "inconsistent data_pad");
goto bad;
}
}
return 0;
bad:
ubi_err(ubi, "inconsistent VID header at PEB %d", pnum);
ubi_dump_vid_hdr(vid_hdr);
ubi_dump_av(av);
return -EINVAL;
}
/**
* add_volume - add volume to the attaching information.
* @ai: attaching information
* @vol_id: ID of the volume to add
* @pnum: physical eraseblock number
* @vid_hdr: volume identifier header
*
* If the volume corresponding to the @vid_hdr logical eraseblock is already
* present in the attaching information, this function does nothing. Otherwise
* it adds corresponding volume to the attaching information. Returns a pointer
* to the allocated "av" object in case of success and a negative error code in
* case of failure.
*/
static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
int vol_id, int pnum,
const struct ubi_vid_hdr *vid_hdr)
{
struct ubi_ainf_volume *av;
bool created;
ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
av = ubi_find_or_add_av(ai, vol_id, &created);
if (IS_ERR(av) || !created)
return av;
av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
av->data_pad = be32_to_cpu(vid_hdr->data_pad);
av->compat = vid_hdr->compat;
av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
: UBI_STATIC_VOLUME;
return av;
}
/**
* ubi_compare_lebs - find out which logical eraseblock is newer.
* @ubi: UBI device description object
* @aeb: first logical eraseblock to compare
* @pnum: physical eraseblock number of the second logical eraseblock to
* compare
* @vid_hdr: volume identifier header of the second logical eraseblock
*
* This function compares 2 copies of a LEB and informs which one is newer. In
* case of success this function returns a positive value, in case of failure, a
* negative error code is returned. The success return codes use the following
* bits:
* o bit 0 is cleared: the first PEB (described by @aeb) is newer than the
* second PEB (described by @pnum and @vid_hdr);
* o bit 0 is set: the second PEB is newer;
* o bit 1 is cleared: no bit-flips were detected in the newer LEB;
* o bit 1 is set: bit-flips were detected in the newer LEB;
* o bit 2 is cleared: the older LEB is not corrupted;
* o bit 2 is set: the older LEB is corrupted.
*/
int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr)
{
int len, err, second_is_newer, bitflips = 0, corrupted = 0;
uint32_t data_crc, crc;
struct ubi_vid_io_buf *vidb = NULL;
unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
if (sqnum2 == aeb->sqnum) {
/*
* This must be a really ancient UBI image which has been
* created before sequence numbers support has been added. At
* that times we used 32-bit LEB versions stored in logical
* eraseblocks. That was before UBI got into mainline. We do not
* support these images anymore. Well, those images still work,
* but only if no unclean reboots happened.
*/
ubi_err(ubi, "unsupported on-flash UBI format");
return -EINVAL;
}
/* Obviously the LEB with lower sequence counter is older */
second_is_newer = (sqnum2 > aeb->sqnum);
/*
* Now we know which copy is newer. If the copy flag of the PEB with
* newer version is not set, then we just return, otherwise we have to
* check data CRC. For the second PEB we already have the VID header,
* for the first one - we'll need to re-read it from flash.
*
* Note: this may be optimized so that we wouldn't read twice.
*/
if (second_is_newer) {
if (!vid_hdr->copy_flag) {
/* It is not a copy, so it is newer */
dbg_bld("second PEB %d is newer, copy_flag is unset",
pnum);
return 1;
}
} else {
if (!aeb->copy_flag) {
/* It is not a copy, so it is newer */
dbg_bld("first PEB %d is newer, copy_flag is unset",
pnum);
return bitflips << 1;
}
vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!vidb)
return -ENOMEM;
pnum = aeb->pnum;
err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0);
if (err) {
if (err == UBI_IO_BITFLIPS)
bitflips = 1;
else {
ubi_err(ubi, "VID of PEB %d header is bad, but it was OK earlier, err %d",
pnum, err);
if (err > 0)
err = -EIO;
goto out_free_vidh;
}
}
vid_hdr = ubi_get_vid_hdr(vidb);
}
/* Read the data of the copy and check the CRC */
len = be32_to_cpu(vid_hdr->data_size);
mutex_lock(&ubi->buf_mutex);
err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
goto out_unlock;
data_crc = be32_to_cpu(vid_hdr->data_crc);
crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
if (crc != data_crc) {
dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
pnum, crc, data_crc);
corrupted = 1;
bitflips = 0;
second_is_newer = !second_is_newer;
} else {
dbg_bld("PEB %d CRC is OK", pnum);
bitflips |= !!err;
}
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_buf(vidb);
if (second_is_newer)
dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
else
dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
return second_is_newer | (bitflips << 1) | (corrupted << 2);
out_unlock:
mutex_unlock(&ubi->buf_mutex);
out_free_vidh:
ubi_free_vid_buf(vidb);
return err;
}
/**
* ubi_add_to_av - add used physical eraseblock to the attaching information.
* @ubi: UBI device description object
* @ai: attaching information
* @pnum: the physical eraseblock number
* @ec: erase counter
* @vid_hdr: the volume identifier header
* @bitflips: if bit-flips were detected when this physical eraseblock was read
*
* This function adds information about a used physical eraseblock to the
* 'used' tree of the corresponding volume. The function is rather complex
* because it has to handle cases when this is not the first physical
* eraseblock belonging to the same logical eraseblock, and the newer one has
* to be picked, while the older one has to be dropped. This function returns
* zero in case of success and a negative error code in case of failure.
*/
int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
{
int err, vol_id, lnum;
unsigned long long sqnum;
struct ubi_ainf_volume *av;
struct ubi_ainf_peb *aeb;
struct rb_node **p, *parent = NULL;
vol_id = be32_to_cpu(vid_hdr->vol_id);
lnum = be32_to_cpu(vid_hdr->lnum);
sqnum = be64_to_cpu(vid_hdr->sqnum);
dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
pnum, vol_id, lnum, ec, sqnum, bitflips);
av = add_volume(ai, vol_id, pnum, vid_hdr);
if (IS_ERR(av))
return PTR_ERR(av);
if (ai->max_sqnum < sqnum)
ai->max_sqnum = sqnum;
/*
* Walk the RB-tree of logical eraseblocks of volume @vol_id to look
* if this is the first instance of this logical eraseblock or not.
*/
p = &av->root.rb_node;
while (*p) {
int cmp_res;
parent = *p;
aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
if (lnum != aeb->lnum) {
if (lnum < aeb->lnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
continue;
}
/*
* There is already a physical eraseblock describing the same
* logical eraseblock present.
*/
dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
aeb->pnum, aeb->sqnum, aeb->ec);
/*
* Make sure that the logical eraseblocks have different
* sequence numbers. Otherwise the image is bad.
*
* However, if the sequence number is zero, we assume it must
* be an ancient UBI image from the era when UBI did not have
* sequence numbers. We still can attach these images, unless
* there is a need to distinguish between old and new
* eraseblocks, in which case we'll refuse the image in
* 'ubi_compare_lebs()'. In other words, we attach old clean
* images, but refuse attaching old images with duplicated
* logical eraseblocks because there was an unclean reboot.
*/
if (aeb->sqnum == sqnum && sqnum != 0) {
ubi_err(ubi, "two LEBs with same sequence number %llu",
sqnum);
ubi_dump_aeb(aeb, 0);
ubi_dump_vid_hdr(vid_hdr);
return -EINVAL;
}
/*
* Now we have to drop the older one and preserve the newer
* one.
*/
cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
if (cmp_res < 0)
return cmp_res;
if (cmp_res & 1) {
/*
* This logical eraseblock is newer than the one
* found earlier.
*/
err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
if (err)
return err;
err = add_to_list(ai, aeb->pnum, aeb->vol_id,
aeb->lnum, aeb->ec, cmp_res & 4,
&ai->erase);
if (err)
return err;
aeb->ec = ec;
aeb->pnum = pnum;
aeb->vol_id = vol_id;
aeb->lnum = lnum;
aeb->scrub = ((cmp_res & 2) || bitflips);
aeb->copy_flag = vid_hdr->copy_flag;
aeb->sqnum = sqnum;
if (av->highest_lnum == lnum)
av->last_data_size =
be32_to_cpu(vid_hdr->data_size);
return 0;
} else {
/*
* This logical eraseblock is older than the one found
* previously.
*/
return add_to_list(ai, pnum, vol_id, lnum, ec,
cmp_res & 4, &ai->erase);
}
}
/*
* We've met this logical eraseblock for the first time, add it to the
* attaching information.
*/
err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
if (err)
return err;
aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
aeb->vol_id = vol_id;
aeb->lnum = lnum;
aeb->scrub = bitflips;
aeb->copy_flag = vid_hdr->copy_flag;
aeb->sqnum = sqnum;
if (av->highest_lnum <= lnum) {
av->highest_lnum = lnum;
av->last_data_size = be32_to_cpu(vid_hdr->data_size);
}
av->leb_count += 1;
rb_link_node(&aeb->u.rb, parent, p);
rb_insert_color(&aeb->u.rb, &av->root);
return 0;
}
/**
* ubi_add_av - add volume to the attaching information.
* @ai: attaching information
* @vol_id: the requested volume ID
*
* This function returns a pointer to the new volume description or an
* ERR_PTR if the operation failed.
*/
struct ubi_ainf_volume *ubi_add_av(struct ubi_attach_info *ai, int vol_id)
{
bool created;
return find_or_add_av(ai, vol_id, AV_ADD, &created);
}
/**
* ubi_find_av - find volume in the attaching information.
* @ai: attaching information
* @vol_id: the requested volume ID
*
* This function returns a pointer to the volume description or %NULL if there
* are no data about this volume in the attaching information.
*/
struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
int vol_id)
{
bool created;
return find_or_add_av((struct ubi_attach_info *)ai, vol_id, AV_FIND,
&created);
}
static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av,
struct list_head *list);
/**
* ubi_remove_av - delete attaching information about a volume.
* @ai: attaching information
* @av: the volume attaching information to delete
*/
void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
{
dbg_bld("remove attaching information about volume %d", av->vol_id);
rb_erase(&av->rb, &ai->volumes);
destroy_av(ai, av, &ai->erase);
ai->vols_found -= 1;
}
/**
* early_erase_peb - erase a physical eraseblock.
* @ubi: UBI device description object
* @ai: attaching information
* @pnum: physical eraseblock number to erase;
* @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown)
*
* This function erases physical eraseblock 'pnum', and writes the erase
* counter header to it. This function should only be used on UBI device
* initialization stages, when the EBA sub-system had not been yet initialized.
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
static int early_erase_peb(struct ubi_device *ubi,
const struct ubi_attach_info *ai, int pnum, int ec)
{
int err;
struct ubi_ec_hdr *ec_hdr;
if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
/*
* Erase counter overflow. Upgrade UBI and use 64-bit
* erase counters internally.
*/
ubi_err(ubi, "erase counter overflow at PEB %d, EC %d",
pnum, ec);
return -EINVAL;
}
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ec_hdr)
return -ENOMEM;
ec_hdr->ec = cpu_to_be64(ec);
err = ubi_io_sync_erase(ubi, pnum, 0);
if (err < 0)
goto out_free;
err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
out_free:
kfree(ec_hdr);
return err;
}
/**
* ubi_early_get_peb - get a free physical eraseblock.
* @ubi: UBI device description object
* @ai: attaching information
*
* This function returns a free physical eraseblock. It is supposed to be
* called on the UBI initialization stages when the wear-leveling sub-system is
* not initialized yet. This function picks a physical eraseblocks from one of
* the lists, writes the EC header if it is needed, and removes it from the
* list.
*
* This function returns a pointer to the "aeb" of the found free PEB in case
* of success and an error code in case of failure.
*/
struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
struct ubi_attach_info *ai)
{
int err = 0;
struct ubi_ainf_peb *aeb, *tmp_aeb;
if (!list_empty(&ai->free)) {
aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
list_del(&aeb->u.list);
dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
return aeb;
}
/*
* We try to erase the first physical eraseblock from the erase list
* and pick it if we succeed, or try to erase the next one if not. And
* so forth. We don't want to take care about bad eraseblocks here -
* they'll be handled later.
*/
list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
if (aeb->ec == UBI_UNKNOWN)
aeb->ec = ai->mean_ec;
err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
if (err)
continue;
aeb->ec += 1;
list_del(&aeb->u.list);
dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
return aeb;
}
ubi_err(ubi, "no free eraseblocks");
return ERR_PTR(-ENOSPC);
}
/**
* check_corruption - check the data area of PEB.
* @ubi: UBI device description object
* @vid_hdr: the (corrupted) VID header of this PEB
* @pnum: the physical eraseblock number to check
*
* This is a helper function which is used to distinguish between VID header
* corruptions caused by power cuts and other reasons. If the PEB contains only
* 0xFF bytes in the data area, the VID header is most probably corrupted
* because of a power cut (%0 is returned in this case). Otherwise, it was
* probably corrupted for some other reasons (%1 is returned in this case). A
* negative error code is returned if a read error occurred.
*
* If the corruption reason was a power cut, UBI can safely erase this PEB.
* Otherwise, it should preserve it to avoid possibly destroying important
* information.
*/
static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
int pnum)
{
int err;
mutex_lock(&ubi->buf_mutex);
memset(ubi->peb_buf, 0x00, ubi->leb_size);
err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
ubi->leb_size);
if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
/*
* Bit-flips or integrity errors while reading the data area.
* It is difficult to say for sure what type of corruption is
* this, but presumably a power cut happened while this PEB was
* erased, so it became unstable and corrupted, and should be
* erased.
*/
err = 0;
goto out_unlock;
}
if (err)
goto out_unlock;
if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
goto out_unlock;
ubi_err(ubi, "PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
pnum);
ubi_err(ubi, "this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
ubi_dump_vid_hdr(vid_hdr);
pr_err("hexdump of PEB %d offset %d, length %d",
pnum, ubi->leb_start, ubi->leb_size);
ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
ubi->peb_buf, ubi->leb_size, 1);
err = 1;
out_unlock:
mutex_unlock(&ubi->buf_mutex);
return err;
}
static bool vol_ignored(int vol_id)
{
switch (vol_id) {
case UBI_LAYOUT_VOLUME_ID:
return true;
}
#ifdef CONFIG_MTD_UBI_FASTMAP
return ubi_is_fm_vol(vol_id);
#else
return false;
#endif
}
/**
* scan_peb - scan and process UBI headers of a PEB.
* @ubi: UBI device description object
* @ai: attaching information
* @pnum: the physical eraseblock number
* @fast: true if we're scanning for a Fastmap
*
* This function reads UBI headers of PEB @pnum, checks them, and adds
* information about this PEB to the corresponding list or RB-tree in the
* "attaching info" structure. Returns zero if the physical eraseblock was
* successfully handled and a negative error code in case of failure.
*/
static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
int pnum, bool fast)
{
struct ubi_ec_hdr *ech = ai->ech;
struct ubi_vid_io_buf *vidb = ai->vidb;
struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb);
long long ec;
int err, bitflips = 0, vol_id = -1, ec_err = 0;
dbg_bld("scan PEB %d", pnum);
/* Skip bad physical eraseblocks */
err = ubi_io_is_bad(ubi, pnum);
if (err < 0)
return err;
else if (err) {
ai->bad_peb_count += 1;
return 0;
}
err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
if (err < 0)
return err;
switch (err) {
case 0:
break;
case UBI_IO_BITFLIPS:
bitflips = 1;
break;
case UBI_IO_FF:
ai->empty_peb_count += 1;
return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
UBI_UNKNOWN, 0, &ai->erase);
case UBI_IO_FF_BITFLIPS:
ai->empty_peb_count += 1;
return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
UBI_UNKNOWN, 1, &ai->erase);
case UBI_IO_BAD_HDR_EBADMSG:
case UBI_IO_BAD_HDR:
/*
* We have to also look at the VID header, possibly it is not
* corrupted. Set %bitflips flag in order to make this PEB be
* moved and EC be re-created.
*/
ec_err = err;
ec = UBI_UNKNOWN;
bitflips = 1;
break;
default:
ubi_err(ubi, "'ubi_io_read_ec_hdr()' returned unknown code %d",
err);
return -EINVAL;
}
if (!ec_err) {
int image_seq;
/* Make sure UBI version is OK */
if (ech->version != UBI_VERSION) {
ubi_err(ubi, "this UBI version is %d, image version is %d",
UBI_VERSION, (int)ech->version);
return -EINVAL;
}
ec = be64_to_cpu(ech->ec);
if (ec > UBI_MAX_ERASECOUNTER) {
/*
* Erase counter overflow. The EC headers have 64 bits
* reserved, but we anyway make use of only 31 bit
* values, as this seems to be enough for any existing
* flash. Upgrade UBI and use 64-bit erase counters
* internally.
*/
ubi_err(ubi, "erase counter overflow, max is %d",
UBI_MAX_ERASECOUNTER);
ubi_dump_ec_hdr(ech);
return -EINVAL;
}
/*
* Make sure that all PEBs have the same image sequence number.
* This allows us to detect situations when users flash UBI
* images incorrectly, so that the flash has the new UBI image
* and leftovers from the old one. This feature was added
* relatively recently, and the sequence number was always
* zero, because old UBI implementations always set it to zero.
* For this reasons, we do not panic if some PEBs have zero
* sequence number, while other PEBs have non-zero sequence
* number.
*/
image_seq = be32_to_cpu(ech->image_seq);
if (!ubi->image_seq)
ubi->image_seq = image_seq;
if (image_seq && ubi->image_seq != image_seq) {
ubi_err(ubi, "bad image sequence number %d in PEB %d, expected %d",
image_seq, pnum, ubi->image_seq);
ubi_dump_ec_hdr(ech);
return -EINVAL;
}
}
/* OK, we've done with the EC header, let's look at the VID header */
err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0);
if (err < 0)
return err;
switch (err) {
case 0:
break;
case UBI_IO_BITFLIPS:
bitflips = 1;
break;
case UBI_IO_BAD_HDR_EBADMSG:
if (ec_err == UBI_IO_BAD_HDR_EBADMSG)
/*
* Both EC and VID headers are corrupted and were read
* with data integrity error, probably this is a bad
* PEB, bit it is not marked as bad yet. This may also
* be a result of power cut during erasure.
*/
ai->maybe_bad_peb_count += 1;
fallthrough;
case UBI_IO_BAD_HDR:
/*
* If we're facing a bad VID header we have to drop *all*
* Fastmap data structures we find. The most recent Fastmap
* could be bad and therefore there is a chance that we attach
* from an old one. On a fine MTD stack a PEB must not render
* bad all of a sudden, but the reality is different.
* So, let's be paranoid and help finding the root cause by
* falling back to scanning mode instead of attaching with a
* bad EBA table and cause data corruption which is hard to
* analyze.
*/
if (fast)
ai->force_full_scan = 1;
if (ec_err)
/*
* Both headers are corrupted. There is a possibility
* that this a valid UBI PEB which has corresponding
* LEB, but the headers are corrupted. However, it is
* impossible to distinguish it from a PEB which just
* contains garbage because of a power cut during erase
* operation. So we just schedule this PEB for erasure.
*
* Besides, in case of NOR flash, we deliberately
* corrupt both headers because NOR flash erasure is
* slow and can start from the end.
*/
err = 0;
else
/*
* The EC was OK, but the VID header is corrupted. We
* have to check what is in the data area.
*/
err = check_corruption(ubi, vidh, pnum);
if (err < 0)
return err;
else if (!err)
/* This corruption is caused by a power cut */
err = add_to_list(ai, pnum, UBI_UNKNOWN,
UBI_UNKNOWN, ec, 1, &ai->erase);
else
/* This is an unexpected corruption */
err = add_corrupted(ai, pnum, ec);
if (err)
return err;
goto adjust_mean_ec;
case UBI_IO_FF_BITFLIPS:
err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
ec, 1, &ai->erase);
if (err)
return err;
goto adjust_mean_ec;
case UBI_IO_FF:
if (ec_err || bitflips)
err = add_to_list(ai, pnum, UBI_UNKNOWN,
UBI_UNKNOWN, ec, 1, &ai->erase);
else
err = add_to_list(ai, pnum, UBI_UNKNOWN,
UBI_UNKNOWN, ec, 0, &ai->free);
if (err)
return err;
goto adjust_mean_ec;
default:
ubi_err(ubi, "'ubi_io_read_vid_hdr()' returned unknown code %d",
err);
return -EINVAL;
}
vol_id = be32_to_cpu(vidh->vol_id);
if (vol_id > UBI_MAX_VOLUMES && !vol_ignored(vol_id)) {
int lnum = be32_to_cpu(vidh->lnum);
/* Unsupported internal volume */
switch (vidh->compat) {
case UBI_COMPAT_DELETE:
ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
vol_id, lnum);
err = add_to_list(ai, pnum, vol_id, lnum,
ec, 1, &ai->erase);
if (err)
return err;
return 0;
case UBI_COMPAT_RO:
ubi_msg(ubi, "read-only compatible internal volume %d:%d found, switch to read-only mode",
vol_id, lnum);
ubi->ro_mode = 1;
break;
case UBI_COMPAT_PRESERVE:
ubi_msg(ubi, "\"preserve\" compatible internal volume %d:%d found",
vol_id, lnum);
err = add_to_list(ai, pnum, vol_id, lnum,
ec, 0, &ai->alien);
if (err)
return err;
return 0;
case UBI_COMPAT_REJECT:
ubi_err(ubi, "incompatible internal volume %d:%d found",
vol_id, lnum);
return -EINVAL;
}
}
if (ec_err)
ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
pnum);
if (ubi_is_fm_vol(vol_id))
err = add_fastmap(ai, pnum, vidh, ec);
else
err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
if (err)
return err;
adjust_mean_ec:
if (!ec_err) {
ai->ec_sum += ec;
ai->ec_count += 1;
if (ec > ai->max_ec)
ai->max_ec = ec;
if (ec < ai->min_ec)
ai->min_ec = ec;
}
return 0;
}
/**
* late_analysis - analyze the overall situation with PEB.
* @ubi: UBI device description object
* @ai: attaching information
*
* This is a helper function which takes a look what PEBs we have after we
* gather information about all of them ("ai" is compete). It decides whether
* the flash is empty and should be formatted of whether there are too many
* corrupted PEBs and we should not attach this MTD device. Returns zero if we
* should proceed with attaching the MTD device, and %-EINVAL if we should not.
*/
static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
struct ubi_ainf_peb *aeb;
int max_corr, peb_count;
peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
max_corr = peb_count / 20 ?: 8;
/*
* Few corrupted PEBs is not a problem and may be just a result of
* unclean reboots. However, many of them may indicate some problems
* with the flash HW or driver.
*/
if (ai->corr_peb_count) {
ubi_err(ubi, "%d PEBs are corrupted and preserved",
ai->corr_peb_count);
pr_err("Corrupted PEBs are:");
list_for_each_entry(aeb, &ai->corr, u.list)
pr_cont(" %d", aeb->pnum);
pr_cont("\n");
/*
* If too many PEBs are corrupted, we refuse attaching,
* otherwise, only print a warning.
*/
if (ai->corr_peb_count >= max_corr) {
ubi_err(ubi, "too many corrupted PEBs, refusing");
return -EINVAL;
}
}
if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
/*
* All PEBs are empty, or almost all - a couple PEBs look like
* they may be bad PEBs which were not marked as bad yet.
*
* This piece of code basically tries to distinguish between
* the following situations:
*
* 1. Flash is empty, but there are few bad PEBs, which are not
* marked as bad so far, and which were read with error. We
* want to go ahead and format this flash. While formatting,
* the faulty PEBs will probably be marked as bad.
*
* 2. Flash contains non-UBI data and we do not want to format
* it and destroy possibly important information.
*/
if (ai->maybe_bad_peb_count <= 2) {
ai->is_empty = 1;
ubi_msg(ubi, "empty MTD device detected");
get_random_bytes(&ubi->image_seq,
sizeof(ubi->image_seq));
} else {
ubi_err(ubi, "MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
return -EINVAL;
}
}
return 0;
}
/**
* destroy_av - free volume attaching information.
* @av: volume attaching information
* @ai: attaching information
* @list: put the aeb elements in there if !NULL, otherwise free them
*
* This function destroys the volume attaching information.
*/
static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av,
struct list_head *list)
{
struct ubi_ainf_peb *aeb;
struct rb_node *this = av->root.rb_node;
while (this) {
if (this->rb_left)
this = this->rb_left;
else if (this->rb_right)
this = this->rb_right;
else {
aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
this = rb_parent(this);
if (this) {
if (this->rb_left == &aeb->u.rb)
this->rb_left = NULL;
else
this->rb_right = NULL;
}
if (list)
list_add_tail(&aeb->u.list, list);
else
ubi_free_aeb(ai, aeb);
}
}
kfree(av);
}
/**
* destroy_ai - destroy attaching information.
* @ai: attaching information
*/
static void destroy_ai(struct ubi_attach_info *ai)
{
struct ubi_ainf_peb *aeb, *aeb_tmp;
struct ubi_ainf_volume *av;
struct rb_node *rb;
list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
list_del(&aeb->u.list);
ubi_free_aeb(ai, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
list_del(&aeb->u.list);
ubi_free_aeb(ai, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
list_del(&aeb->u.list);
ubi_free_aeb(ai, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
list_del(&aeb->u.list);
ubi_free_aeb(ai, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) {
list_del(&aeb->u.list);
ubi_free_aeb(ai, aeb);
}
/* Destroy the volume RB-tree */
rb = ai->volumes.rb_node;
while (rb) {
if (rb->rb_left)
rb = rb->rb_left;
else if (rb->rb_right)
rb = rb->rb_right;
else {
av = rb_entry(rb, struct ubi_ainf_volume, rb);
rb = rb_parent(rb);
if (rb) {
if (rb->rb_left == &av->rb)
rb->rb_left = NULL;
else
rb->rb_right = NULL;
}
destroy_av(ai, av, NULL);
}
}
kmem_cache_destroy(ai->aeb_slab_cache);
kfree(ai);
}
/**
* scan_all - scan entire MTD device.
* @ubi: UBI device description object
* @ai: attach info object
* @start: start scanning at this PEB
*
* This function does full scanning of an MTD device and returns complete
* information about it in form of a "struct ubi_attach_info" object. In case
* of failure, an error code is returned.
*/
static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
int start)
{
int err, pnum;
struct rb_node *rb1, *rb2;
struct ubi_ainf_volume *av;
struct ubi_ainf_peb *aeb;
err = -ENOMEM;
ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ai->ech)
return err;
ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!ai->vidb)
goto out_ech;
for (pnum = start; pnum < ubi->peb_count; pnum++) {
cond_resched();
dbg_gen("process PEB %d", pnum);
err = scan_peb(ubi, ai, pnum, false);
if (err < 0)
goto out_vidh;
}
ubi_msg(ubi, "scanning is finished");
/* Calculate mean erase counter */
if (ai->ec_count)
ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
err = late_analysis(ubi, ai);
if (err)
goto out_vidh;
/*
* In case of unknown erase counter we use the mean erase counter
* value.
*/
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
if (aeb->ec == UBI_UNKNOWN)
aeb->ec = ai->mean_ec;
}
list_for_each_entry(aeb, &ai->free, u.list) {
if (aeb->ec == UBI_UNKNOWN)
aeb->ec = ai->mean_ec;
}
list_for_each_entry(aeb, &ai->corr, u.list)
if (aeb->ec == UBI_UNKNOWN)
aeb->ec = ai->mean_ec;
list_for_each_entry(aeb, &ai->erase, u.list)
if (aeb->ec == UBI_UNKNOWN)
aeb->ec = ai->mean_ec;
err = self_check_ai(ubi, ai);
if (err)
goto out_vidh;
ubi_free_vid_buf(ai->vidb);
kfree(ai->ech);
return 0;
out_vidh:
ubi_free_vid_buf(ai->vidb);
out_ech:
kfree(ai->ech);
return err;
}
static struct ubi_attach_info *alloc_ai(void)
{
struct ubi_attach_info *ai;
ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
if (!ai)
return ai;
INIT_LIST_HEAD(&ai->corr);
INIT_LIST_HEAD(&ai->free);
INIT_LIST_HEAD(&ai->erase);
INIT_LIST_HEAD(&ai->alien);
INIT_LIST_HEAD(&ai->fastmap);
ai->volumes = RB_ROOT;
ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
sizeof(struct ubi_ainf_peb),
0, 0, NULL);
if (!ai->aeb_slab_cache) {
kfree(ai);
ai = NULL;
}
return ai;
}
#ifdef CONFIG_MTD_UBI_FASTMAP
/**
* scan_fast - try to find a fastmap and attach from it.
* @ubi: UBI device description object
* @ai: attach info object
*
* Returns 0 on success, negative return values indicate an internal
* error.
* UBI_NO_FASTMAP denotes that no fastmap was found.
* UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
*/
static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
{
int err, pnum;
struct ubi_attach_info *scan_ai;
err = -ENOMEM;
scan_ai = alloc_ai();
if (!scan_ai)
goto out;
scan_ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!scan_ai->ech)
goto out_ai;
scan_ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!scan_ai->vidb)
goto out_ech;
for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
cond_resched();
dbg_gen("process PEB %d", pnum);
err = scan_peb(ubi, scan_ai, pnum, true);
if (err < 0)
goto out_vidh;
}
ubi_free_vid_buf(scan_ai->vidb);
kfree(scan_ai->ech);
if (scan_ai->force_full_scan)
err = UBI_NO_FASTMAP;
else
err = ubi_scan_fastmap(ubi, *ai, scan_ai);
if (err) {
/*
* Didn't attach via fastmap, do a full scan but reuse what
* we've aready scanned.
*/
destroy_ai(*ai);
*ai = scan_ai;
} else
destroy_ai(scan_ai);
return err;
out_vidh:
ubi_free_vid_buf(scan_ai->vidb);
out_ech:
kfree(scan_ai->ech);
out_ai:
destroy_ai(scan_ai);
out:
return err;
}
#endif
/**
* ubi_attach - attach an MTD device.
* @ubi: UBI device descriptor
* @force_scan: if set to non-zero attach by scanning
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
int ubi_attach(struct ubi_device *ubi, int force_scan)
{
int err;
struct ubi_attach_info *ai;
ai = alloc_ai();
if (!ai)
return -ENOMEM;
#ifdef CONFIG_MTD_UBI_FASTMAP
/* On small flash devices we disable fastmap in any case. */
if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
ubi->fm_disabled = 1;
force_scan = 1;
}
if (force_scan)
err = scan_all(ubi, ai, 0);
else {
err = scan_fast(ubi, &ai);
if (err > 0 || mtd_is_eccerr(err)) {
if (err != UBI_NO_FASTMAP) {
destroy_ai(ai);
ai = alloc_ai();
if (!ai)
return -ENOMEM;
err = scan_all(ubi, ai, 0);
} else {
err = scan_all(ubi, ai, UBI_FM_MAX_START);
}
}
}
#else
err = scan_all(ubi, ai, 0);
#endif
if (err)
goto out_ai;
ubi->bad_peb_count = ai->bad_peb_count;
ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
ubi->corr_peb_count = ai->corr_peb_count;
ubi->max_ec = ai->max_ec;
ubi->mean_ec = ai->mean_ec;
dbg_gen("max. sequence number: %llu", ai->max_sqnum);
err = ubi_read_volume_table(ubi, ai);
if (err)
goto out_ai;
err = ubi_wl_init(ubi, ai);
if (err)
goto out_vtbl;
err = ubi_eba_init(ubi, ai);
if (err)
goto out_wl;
#ifdef CONFIG_MTD_UBI_FASTMAP
if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
struct ubi_attach_info *scan_ai;
scan_ai = alloc_ai();
if (!scan_ai) {
err = -ENOMEM;
goto out_wl;
}
err = scan_all(ubi, scan_ai, 0);
if (err) {
destroy_ai(scan_ai);
goto out_wl;
}
err = self_check_eba(ubi, ai, scan_ai);
destroy_ai(scan_ai);
if (err)
goto out_wl;
}
#endif
destroy_ai(ai);
return 0;
out_wl:
ubi_wl_close(ubi);
out_vtbl:
ubi_free_all_volumes(ubi);
vfree(ubi->vtbl);
out_ai:
destroy_ai(ai);
return err;
}
/**
* self_check_ai - check the attaching information.
* @ubi: UBI device description object
* @ai: attaching information
*
* This function returns zero if the attaching information is all right, and a
* negative error code if not or if an error occurred.
*/
static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
struct ubi_vid_io_buf *vidb = ai->vidb;
struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb);
int pnum, err, vols_found = 0;
struct rb_node *rb1, *rb2;
struct ubi_ainf_volume *av;
struct ubi_ainf_peb *aeb, *last_aeb;
uint8_t *buf;
if (!ubi_dbg_chk_gen(ubi))
return 0;
/*
* At first, check that attaching information is OK.
*/
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
int leb_count = 0;
cond_resched();
vols_found += 1;
if (ai->is_empty) {
ubi_err(ubi, "bad is_empty flag");
goto bad_av;
}
if (av->vol_id < 0 || av->highest_lnum < 0 ||
av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
av->data_pad < 0 || av->last_data_size < 0) {
ubi_err(ubi, "negative values");
goto bad_av;
}
if (av->vol_id >= UBI_MAX_VOLUMES &&
av->vol_id < UBI_INTERNAL_VOL_START) {
ubi_err(ubi, "bad vol_id");
goto bad_av;
}
if (av->vol_id > ai->highest_vol_id) {
ubi_err(ubi, "highest_vol_id is %d, but vol_id %d is there",
ai->highest_vol_id, av->vol_id);
goto out;
}
if (av->vol_type != UBI_DYNAMIC_VOLUME &&
av->vol_type != UBI_STATIC_VOLUME) {
ubi_err(ubi, "bad vol_type");
goto bad_av;
}
if (av->data_pad > ubi->leb_size / 2) {
ubi_err(ubi, "bad data_pad");
goto bad_av;
}
last_aeb = NULL;
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
cond_resched();
last_aeb = aeb;
leb_count += 1;
if (aeb->pnum < 0 || aeb->ec < 0) {
ubi_err(ubi, "negative values");
goto bad_aeb;
}
if (aeb->ec < ai->min_ec) {
ubi_err(ubi, "bad ai->min_ec (%d), %d found",
ai->min_ec, aeb->ec);
goto bad_aeb;
}
if (aeb->ec > ai->max_ec) {
ubi_err(ubi, "bad ai->max_ec (%d), %d found",
ai->max_ec, aeb->ec);
goto bad_aeb;
}
if (aeb->pnum >= ubi->peb_count) {
ubi_err(ubi, "too high PEB number %d, total PEBs %d",
aeb->pnum, ubi->peb_count);
goto bad_aeb;
}
if (av->vol_type == UBI_STATIC_VOLUME) {
if (aeb->lnum >= av->used_ebs) {
ubi_err(ubi, "bad lnum or used_ebs");
goto bad_aeb;
}
} else {
if (av->used_ebs != 0) {
ubi_err(ubi, "non-zero used_ebs");
goto bad_aeb;
}
}
if (aeb->lnum > av->highest_lnum) {
ubi_err(ubi, "incorrect highest_lnum or lnum");
goto bad_aeb;
}
}
if (av->leb_count != leb_count) {
ubi_err(ubi, "bad leb_count, %d objects in the tree",
leb_count);
goto bad_av;
}
if (!last_aeb)
continue;
aeb = last_aeb;
if (aeb->lnum != av->highest_lnum) {
ubi_err(ubi, "bad highest_lnum");
goto bad_aeb;
}
}
if (vols_found != ai->vols_found) {
ubi_err(ubi, "bad ai->vols_found %d, should be %d",
ai->vols_found, vols_found);
goto out;
}
/* Check that attaching information is correct */
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
last_aeb = NULL;
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
int vol_type;
cond_resched();
last_aeb = aeb;
err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidb, 1);
if (err && err != UBI_IO_BITFLIPS) {
ubi_err(ubi, "VID header is not OK (%d)",
err);
if (err > 0)
err = -EIO;
return err;
}
vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
if (av->vol_type != vol_type) {
ubi_err(ubi, "bad vol_type");
goto bad_vid_hdr;
}
if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
ubi_err(ubi, "bad sqnum %llu", aeb->sqnum);
goto bad_vid_hdr;
}
if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
ubi_err(ubi, "bad vol_id %d", av->vol_id);
goto bad_vid_hdr;
}
if (av->compat != vidh->compat) {
ubi_err(ubi, "bad compat %d", vidh->compat);
goto bad_vid_hdr;
}
if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
ubi_err(ubi, "bad lnum %d", aeb->lnum);
goto bad_vid_hdr;
}
if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
ubi_err(ubi, "bad used_ebs %d", av->used_ebs);
goto bad_vid_hdr;
}
if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
ubi_err(ubi, "bad data_pad %d", av->data_pad);
goto bad_vid_hdr;
}
}
if (!last_aeb)
continue;
if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
ubi_err(ubi, "bad highest_lnum %d", av->highest_lnum);
goto bad_vid_hdr;
}
if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
ubi_err(ubi, "bad last_data_size %d",
av->last_data_size);
goto bad_vid_hdr;
}
}
/*
* Make sure that all the physical eraseblocks are in one of the lists
* or trees.
*/
buf = kzalloc(ubi->peb_count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
for (pnum = 0; pnum < ubi->peb_count; pnum++) {
err = ubi_io_is_bad(ubi, pnum);
if (err < 0) {
kfree(buf);
return err;
} else if (err)
buf[pnum] = 1;
}
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
buf[aeb->pnum] = 1;
list_for_each_entry(aeb, &ai->free, u.list)
buf[aeb->pnum] = 1;
list_for_each_entry(aeb, &ai->corr, u.list)
buf[aeb->pnum] = 1;
list_for_each_entry(aeb, &ai->erase, u.list)
buf[aeb->pnum] = 1;
list_for_each_entry(aeb, &ai->alien, u.list)
buf[aeb->pnum] = 1;
err = 0;
for (pnum = 0; pnum < ubi->peb_count; pnum++)
if (!buf[pnum]) {
ubi_err(ubi, "PEB %d is not referred", pnum);
err = 1;
}
kfree(buf);
if (err)
goto out;
return 0;
bad_aeb:
ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum);
ubi_dump_aeb(aeb, 0);
ubi_dump_av(av);
goto out;
bad_av:
ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
ubi_dump_av(av);
goto out;
bad_vid_hdr:
ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
ubi_dump_av(av);
ubi_dump_vid_hdr(vidh);
out:
dump_stack();
return -EINVAL;
}
| linux-master | drivers/mtd/ubi/attach.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012 Linutronix GmbH
* Copyright (c) 2014 sigma star gmbh
* Author: Richard Weinberger <[email protected]>
*/
#include <linux/crc32.h>
#include <linux/bitmap.h>
#include "ubi.h"
/**
* init_seen - allocate memory for used for debugging.
* @ubi: UBI device description object
*/
static inline unsigned long *init_seen(struct ubi_device *ubi)
{
unsigned long *ret;
if (!ubi_dbg_chk_fastmap(ubi))
return NULL;
ret = bitmap_zalloc(ubi->peb_count, GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
return ret;
}
/**
* free_seen - free the seen logic integer array.
* @seen: integer array of @ubi->peb_count size
*/
static inline void free_seen(unsigned long *seen)
{
bitmap_free(seen);
}
/**
* set_seen - mark a PEB as seen.
* @ubi: UBI device description object
* @pnum: The PEB to be makred as seen
* @seen: integer array of @ubi->peb_count size
*/
static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
{
if (!ubi_dbg_chk_fastmap(ubi) || !seen)
return;
set_bit(pnum, seen);
}
/**
* self_check_seen - check whether all PEB have been seen by fastmap.
* @ubi: UBI device description object
* @seen: integer array of @ubi->peb_count size
*/
static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
{
int pnum, ret = 0;
if (!ubi_dbg_chk_fastmap(ubi) || !seen)
return 0;
for (pnum = 0; pnum < ubi->peb_count; pnum++) {
if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
ret = -EINVAL;
}
}
return ret;
}
/**
* ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
* @ubi: UBI device description object
*/
size_t ubi_calc_fm_size(struct ubi_device *ubi)
{
size_t size;
size = sizeof(struct ubi_fm_sb) +
sizeof(struct ubi_fm_hdr) +
sizeof(struct ubi_fm_scan_pool) +
sizeof(struct ubi_fm_scan_pool) +
(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
(sizeof(struct ubi_fm_eba) +
(ubi->peb_count * sizeof(__be32))) +
sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
return roundup(size, ubi->leb_size);
}
/**
* new_fm_vbuf() - allocate a new volume header for fastmap usage.
* @ubi: UBI device description object
* @vol_id: the VID of the new header
*
* Returns a new struct ubi_vid_hdr on success.
* NULL indicates out of memory.
*/
static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
{
struct ubi_vid_io_buf *new;
struct ubi_vid_hdr *vh;
new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!new)
goto out;
vh = ubi_get_vid_hdr(new);
vh->vol_type = UBI_VID_DYNAMIC;
vh->vol_id = cpu_to_be32(vol_id);
/* UBI implementations without fastmap support have to delete the
* fastmap.
*/
vh->compat = UBI_COMPAT_DELETE;
out:
return new;
}
/**
* add_aeb - create and add a attach erase block to a given list.
* @ai: UBI attach info object
* @list: the target list
* @pnum: PEB number of the new attach erase block
* @ec: erease counter of the new LEB
* @scrub: scrub this PEB after attaching
*
* Returns 0 on success, < 0 indicates an internal error.
*/
static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
int pnum, int ec, int scrub)
{
struct ubi_ainf_peb *aeb;
aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
aeb->lnum = -1;
aeb->scrub = scrub;
aeb->copy_flag = aeb->sqnum = 0;
ai->ec_sum += aeb->ec;
ai->ec_count++;
if (ai->max_ec < aeb->ec)
ai->max_ec = aeb->ec;
if (ai->min_ec > aeb->ec)
ai->min_ec = aeb->ec;
list_add_tail(&aeb->u.list, list);
return 0;
}
/**
* add_vol - create and add a new volume to ubi_attach_info.
* @ai: ubi_attach_info object
* @vol_id: VID of the new volume
* @used_ebs: number of used EBS
* @data_pad: data padding value of the new volume
* @vol_type: volume type
* @last_eb_bytes: number of bytes in the last LEB
*
* Returns the new struct ubi_ainf_volume on success.
* NULL indicates an error.
*/
static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
int used_ebs, int data_pad, u8 vol_type,
int last_eb_bytes)
{
struct ubi_ainf_volume *av;
av = ubi_add_av(ai, vol_id);
if (IS_ERR(av))
return av;
av->data_pad = data_pad;
av->last_data_size = last_eb_bytes;
av->compat = 0;
av->vol_type = vol_type;
if (av->vol_type == UBI_STATIC_VOLUME)
av->used_ebs = used_ebs;
dbg_bld("found volume (ID %i)", vol_id);
return av;
}
/**
* assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
* from it's original list.
* @ai: ubi_attach_info object
* @aeb: the to be assigned SEB
* @av: target scan volume
*/
static void assign_aeb_to_av(struct ubi_attach_info *ai,
struct ubi_ainf_peb *aeb,
struct ubi_ainf_volume *av)
{
struct ubi_ainf_peb *tmp_aeb;
struct rb_node **p = &av->root.rb_node, *parent = NULL;
while (*p) {
parent = *p;
tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
if (aeb->lnum != tmp_aeb->lnum) {
if (aeb->lnum < tmp_aeb->lnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
continue;
} else
break;
}
list_del(&aeb->u.list);
av->leb_count++;
rb_link_node(&aeb->u.rb, parent, p);
rb_insert_color(&aeb->u.rb, &av->root);
}
/**
* update_vol - inserts or updates a LEB which was found a pool.
* @ubi: the UBI device object
* @ai: attach info object
* @av: the volume this LEB belongs to
* @new_vh: the volume header derived from new_aeb
* @new_aeb: the AEB to be examined
*
* Returns 0 on success, < 0 indicates an internal error.
*/
static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
struct ubi_ainf_peb *new_aeb)
{
struct rb_node **p = &av->root.rb_node, *parent = NULL;
struct ubi_ainf_peb *aeb, *victim;
int cmp_res;
while (*p) {
parent = *p;
aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
continue;
}
/* This case can happen if the fastmap gets written
* because of a volume change (creation, deletion, ..).
* Then a PEB can be within the persistent EBA and the pool.
*/
if (aeb->pnum == new_aeb->pnum) {
ubi_assert(aeb->lnum == new_aeb->lnum);
ubi_free_aeb(ai, new_aeb);
return 0;
}
cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
if (cmp_res < 0)
return cmp_res;
/* new_aeb is newer */
if (cmp_res & 1) {
victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
if (!victim)
return -ENOMEM;
list_add_tail(&victim->u.list, &ai->erase);
if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
av->last_data_size =
be32_to_cpu(new_vh->data_size);
dbg_bld("vol %i: AEB %i's PEB %i is the newer",
av->vol_id, aeb->lnum, new_aeb->pnum);
aeb->ec = new_aeb->ec;
aeb->pnum = new_aeb->pnum;
aeb->copy_flag = new_vh->copy_flag;
aeb->scrub = new_aeb->scrub;
aeb->sqnum = new_aeb->sqnum;
ubi_free_aeb(ai, new_aeb);
/* new_aeb is older */
} else {
dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
av->vol_id, aeb->lnum, new_aeb->pnum);
list_add_tail(&new_aeb->u.list, &ai->erase);
}
return 0;
}
/* This LEB is new, let's add it to the volume */
if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
av->highest_lnum = be32_to_cpu(new_vh->lnum);
av->last_data_size = be32_to_cpu(new_vh->data_size);
}
if (av->vol_type == UBI_STATIC_VOLUME)
av->used_ebs = be32_to_cpu(new_vh->used_ebs);
av->leb_count++;
rb_link_node(&new_aeb->u.rb, parent, p);
rb_insert_color(&new_aeb->u.rb, &av->root);
return 0;
}
/**
* process_pool_aeb - we found a non-empty PEB in a pool.
* @ubi: UBI device object
* @ai: attach info object
* @new_vh: the volume header derived from new_aeb
* @new_aeb: the AEB to be examined
*
* Returns 0 on success, < 0 indicates an internal error.
*/
static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_vid_hdr *new_vh,
struct ubi_ainf_peb *new_aeb)
{
int vol_id = be32_to_cpu(new_vh->vol_id);
struct ubi_ainf_volume *av;
if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
ubi_free_aeb(ai, new_aeb);
return 0;
}
/* Find the volume this SEB belongs to */
av = ubi_find_av(ai, vol_id);
if (!av) {
ubi_err(ubi, "orphaned volume in fastmap pool!");
ubi_free_aeb(ai, new_aeb);
return UBI_BAD_FASTMAP;
}
ubi_assert(vol_id == av->vol_id);
return update_vol(ubi, ai, av, new_vh, new_aeb);
}
/**
* unmap_peb - unmap a PEB.
* If fastmap detects a free PEB in the pool it has to check whether
* this PEB has been unmapped after writing the fastmap.
*
* @ai: UBI attach info object
* @pnum: The PEB to be unmapped
*/
static void unmap_peb(struct ubi_attach_info *ai, int pnum)
{
struct ubi_ainf_volume *av;
struct rb_node *node, *node2;
struct ubi_ainf_peb *aeb;
ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
if (aeb->pnum == pnum) {
rb_erase(&aeb->u.rb, &av->root);
av->leb_count--;
ubi_free_aeb(ai, aeb);
return;
}
}
}
}
/**
* scan_pool - scans a pool for changed (no longer empty PEBs).
* @ubi: UBI device object
* @ai: attach info object
* @pebs: an array of all PEB numbers in the to be scanned pool
* @pool_size: size of the pool (number of entries in @pebs)
* @max_sqnum: pointer to the maximal sequence number
* @free: list of PEBs which are most likely free (and go into @ai->free)
*
* Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
* < 0 indicates an internal error.
*/
static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
__be32 *pebs, int pool_size, unsigned long long *max_sqnum,
struct list_head *free)
{
struct ubi_vid_io_buf *vb;
struct ubi_vid_hdr *vh;
struct ubi_ec_hdr *ech;
struct ubi_ainf_peb *new_aeb;
int i, pnum, err, ret = 0;
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
return -ENOMEM;
vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!vb) {
kfree(ech);
return -ENOMEM;
}
vh = ubi_get_vid_hdr(vb);
dbg_bld("scanning fastmap pool: size = %i", pool_size);
/*
* Now scan all PEBs in the pool to find changes which have been made
* after the creation of the fastmap
*/
for (i = 0; i < pool_size; i++) {
int scrub = 0;
int image_seq;
pnum = be32_to_cpu(pebs[i]);
if (ubi_io_is_bad(ubi, pnum)) {
ubi_err(ubi, "bad PEB in fastmap pool!");
ret = UBI_BAD_FASTMAP;
goto out;
}
err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
if (err && err != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
pnum, err);
ret = err > 0 ? UBI_BAD_FASTMAP : err;
goto out;
} else if (err == UBI_IO_BITFLIPS)
scrub = 1;
/*
* Older UBI implementations have image_seq set to zero, so
* we shouldn't fail if image_seq == 0.
*/
image_seq = be32_to_cpu(ech->image_seq);
if (image_seq && (image_seq != ubi->image_seq)) {
ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
be32_to_cpu(ech->image_seq), ubi->image_seq);
ret = UBI_BAD_FASTMAP;
goto out;
}
err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
unsigned long long ec = be64_to_cpu(ech->ec);
unmap_peb(ai, pnum);
dbg_bld("Adding PEB to free: %i", pnum);
if (err == UBI_IO_FF_BITFLIPS)
scrub = 1;
ret = add_aeb(ai, free, pnum, ec, scrub);
if (ret)
goto out;
continue;
} else if (err == 0 || err == UBI_IO_BITFLIPS) {
dbg_bld("Found non empty PEB:%i in pool", pnum);
if (err == UBI_IO_BITFLIPS)
scrub = 1;
new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
if (!new_aeb) {
ret = -ENOMEM;
goto out;
}
new_aeb->lnum = be32_to_cpu(vh->lnum);
new_aeb->sqnum = be64_to_cpu(vh->sqnum);
new_aeb->copy_flag = vh->copy_flag;
new_aeb->scrub = scrub;
if (*max_sqnum < new_aeb->sqnum)
*max_sqnum = new_aeb->sqnum;
err = process_pool_aeb(ubi, ai, vh, new_aeb);
if (err) {
ret = err > 0 ? UBI_BAD_FASTMAP : err;
goto out;
}
} else {
/* We are paranoid and fall back to scanning mode */
ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
ret = err > 0 ? UBI_BAD_FASTMAP : err;
goto out;
}
}
out:
ubi_free_vid_buf(vb);
kfree(ech);
return ret;
}
/**
* count_fastmap_pebs - Counts the PEBs found by fastmap.
* @ai: The UBI attach info object
*/
static int count_fastmap_pebs(struct ubi_attach_info *ai)
{
struct ubi_ainf_peb *aeb;
struct ubi_ainf_volume *av;
struct rb_node *rb1, *rb2;
int n = 0;
list_for_each_entry(aeb, &ai->erase, u.list)
n++;
list_for_each_entry(aeb, &ai->free, u.list)
n++;
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
n++;
return n;
}
/**
* ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
* @ubi: UBI device object
* @ai: UBI attach info object
* @fm: the fastmap to be attached
*
* Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
* < 0 indicates an internal error.
*/
static int ubi_attach_fastmap(struct ubi_device *ubi,
struct ubi_attach_info *ai,
struct ubi_fastmap_layout *fm)
{
struct list_head used, free;
struct ubi_ainf_volume *av;
struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
struct ubi_fm_sb *fmsb;
struct ubi_fm_hdr *fmhdr;
struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
struct ubi_fm_ec *fmec;
struct ubi_fm_volhdr *fmvhdr;
struct ubi_fm_eba *fm_eba;
int ret, i, j, pool_size, wl_pool_size;
size_t fm_pos = 0, fm_size = ubi->fm_size;
unsigned long long max_sqnum = 0;
void *fm_raw = ubi->fm_buf;
INIT_LIST_HEAD(&used);
INIT_LIST_HEAD(&free);
ai->min_ec = UBI_MAX_ERASECOUNTER;
fmsb = (struct ubi_fm_sb *)(fm_raw);
ai->max_sqnum = fmsb->sqnum;
fm_pos += sizeof(struct ubi_fm_sb);
if (fm_pos >= fm_size)
goto fail_bad;
fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmhdr);
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
goto fail_bad;
}
fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmpl);
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
goto fail_bad;
}
fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmpl_wl);
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
goto fail_bad;
}
pool_size = be16_to_cpu(fmpl->size);
wl_pool_size = be16_to_cpu(fmpl_wl->size);
fm->max_pool_size = be16_to_cpu(fmpl->max_size);
fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
ubi_err(ubi, "bad pool size: %i", pool_size);
goto fail_bad;
}
if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
goto fail_bad;
}
if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
fm->max_pool_size < 0) {
ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
goto fail_bad;
}
if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
fm->max_wl_pool_size < 0) {
ubi_err(ubi, "bad maximal WL pool size: %i",
fm->max_wl_pool_size);
goto fail_bad;
}
/* read EC values from free list */
for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmec);
if (fm_pos >= fm_size)
goto fail_bad;
ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 0);
if (ret)
goto fail;
}
/* read EC values from used list */
for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmec);
if (fm_pos >= fm_size)
goto fail_bad;
ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 0);
if (ret)
goto fail;
}
/* read EC values from scrub list */
for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmec);
if (fm_pos >= fm_size)
goto fail_bad;
ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 1);
if (ret)
goto fail;
}
/* read EC values from erase list */
for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmec);
if (fm_pos >= fm_size)
goto fail_bad;
ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 1);
if (ret)
goto fail;
}
ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
/* Iterate over all volumes and read their EBA table */
for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmvhdr);
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
goto fail_bad;
}
av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
be32_to_cpu(fmvhdr->used_ebs),
be32_to_cpu(fmvhdr->data_pad),
fmvhdr->vol_type,
be32_to_cpu(fmvhdr->last_eb_bytes));
if (IS_ERR(av)) {
if (PTR_ERR(av) == -EEXIST)
ubi_err(ubi, "volume (ID %i) already exists",
fmvhdr->vol_id);
goto fail_bad;
}
ai->vols_found++;
if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
fm_pos += sizeof(*fm_eba);
fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
goto fail_bad;
}
for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
int pnum = be32_to_cpu(fm_eba->pnum[j]);
if (pnum < 0)
continue;
aeb = NULL;
list_for_each_entry(tmp_aeb, &used, u.list) {
if (tmp_aeb->pnum == pnum) {
aeb = tmp_aeb;
break;
}
}
if (!aeb) {
ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
goto fail_bad;
}
aeb->lnum = j;
if (av->highest_lnum <= aeb->lnum)
av->highest_lnum = aeb->lnum;
assign_aeb_to_av(ai, aeb, av);
dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
aeb->pnum, aeb->lnum, av->vol_id);
}
}
ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
if (ret)
goto fail;
ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
if (ret)
goto fail;
if (max_sqnum > ai->max_sqnum)
ai->max_sqnum = max_sqnum;
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
list_move_tail(&tmp_aeb->u.list, &ai->free);
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
list_move_tail(&tmp_aeb->u.list, &ai->erase);
ubi_assert(list_empty(&free));
/*
* If fastmap is leaking PEBs (must not happen), raise a
* fat warning and fall back to scanning mode.
* We do this here because in ubi_wl_init() it's too late
* and we cannot fall back to scanning.
*/
if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
ai->bad_peb_count - fm->used_blocks))
goto fail_bad;
return 0;
fail_bad:
ret = UBI_BAD_FASTMAP;
fail:
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
list_del(&tmp_aeb->u.list);
ubi_free_aeb(ai, tmp_aeb);
}
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
list_del(&tmp_aeb->u.list);
ubi_free_aeb(ai, tmp_aeb);
}
return ret;
}
/**
* find_fm_anchor - find the most recent Fastmap superblock (anchor)
* @ai: UBI attach info to be filled
*/
static int find_fm_anchor(struct ubi_attach_info *ai)
{
int ret = -1;
struct ubi_ainf_peb *aeb;
unsigned long long max_sqnum = 0;
list_for_each_entry(aeb, &ai->fastmap, u.list) {
if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
max_sqnum = aeb->sqnum;
ret = aeb->pnum;
}
}
return ret;
}
static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
struct ubi_ainf_peb *old)
{
struct ubi_ainf_peb *new;
new = ubi_alloc_aeb(ai, old->pnum, old->ec);
if (!new)
return NULL;
new->vol_id = old->vol_id;
new->sqnum = old->sqnum;
new->lnum = old->lnum;
new->scrub = old->scrub;
new->copy_flag = old->copy_flag;
return new;
}
/**
* ubi_scan_fastmap - scan the fastmap.
* @ubi: UBI device object
* @ai: UBI attach info to be filled
* @scan_ai: UBI attach info from the first 64 PEBs,
* used to find the most recent Fastmap data structure
*
* Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
* UBI_BAD_FASTMAP if one was found but is not usable.
* < 0 indicates an internal error.
*/
int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_attach_info *scan_ai)
{
struct ubi_fm_sb *fmsb, *fmsb2;
struct ubi_vid_io_buf *vb;
struct ubi_vid_hdr *vh;
struct ubi_ec_hdr *ech;
struct ubi_fastmap_layout *fm;
struct ubi_ainf_peb *aeb;
int i, used_blocks, pnum, fm_anchor, ret = 0;
size_t fm_size;
__be32 crc, tmp_crc;
unsigned long long sqnum = 0;
fm_anchor = find_fm_anchor(scan_ai);
if (fm_anchor < 0)
return UBI_NO_FASTMAP;
/* Copy all (possible) fastmap blocks into our new attach structure. */
list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
struct ubi_ainf_peb *new;
new = clone_aeb(ai, aeb);
if (!new)
return -ENOMEM;
list_add(&new->u.list, &ai->fastmap);
}
down_write(&ubi->fm_protect);
memset(ubi->fm_buf, 0, ubi->fm_size);
fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
if (!fmsb) {
ret = -ENOMEM;
goto out;
}
fm = kzalloc(sizeof(*fm), GFP_KERNEL);
if (!fm) {
ret = -ENOMEM;
kfree(fmsb);
goto out;
}
ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
if (ret && ret != UBI_IO_BITFLIPS)
goto free_fm_sb;
else if (ret == UBI_IO_BITFLIPS)
fm->to_be_tortured[0] = 1;
if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
ret = UBI_BAD_FASTMAP;
goto free_fm_sb;
}
if (fmsb->version != UBI_FM_FMT_VERSION) {
ubi_err(ubi, "bad fastmap version: %i, expected: %i",
fmsb->version, UBI_FM_FMT_VERSION);
ret = UBI_BAD_FASTMAP;
goto free_fm_sb;
}
used_blocks = be32_to_cpu(fmsb->used_blocks);
if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
ubi_err(ubi, "number of fastmap blocks is invalid: %i",
used_blocks);
ret = UBI_BAD_FASTMAP;
goto free_fm_sb;
}
fm_size = ubi->leb_size * used_blocks;
if (fm_size != ubi->fm_size) {
ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
fm_size, ubi->fm_size);
ret = UBI_BAD_FASTMAP;
goto free_fm_sb;
}
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech) {
ret = -ENOMEM;
goto free_fm_sb;
}
vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!vb) {
ret = -ENOMEM;
goto free_hdr;
}
vh = ubi_get_vid_hdr(vb);
for (i = 0; i < used_blocks; i++) {
int image_seq;
pnum = be32_to_cpu(fmsb->block_loc[i]);
if (ubi_io_is_bad(ubi, pnum)) {
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
if (i == 0 && pnum != fm_anchor) {
ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
pnum, fm_anchor);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
i, pnum);
if (ret > 0)
ret = UBI_BAD_FASTMAP;
goto free_hdr;
} else if (ret == UBI_IO_BITFLIPS)
fm->to_be_tortured[i] = 1;
image_seq = be32_to_cpu(ech->image_seq);
if (!ubi->image_seq)
ubi->image_seq = image_seq;
/*
* Older UBI implementations have image_seq set to zero, so
* we shouldn't fail if image_seq == 0.
*/
if (image_seq && (image_seq != ubi->image_seq)) {
ubi_err(ubi, "wrong image seq:%d instead of %d",
be32_to_cpu(ech->image_seq), ubi->image_seq);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
i, pnum);
goto free_hdr;
}
if (i == 0) {
if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
be32_to_cpu(vh->vol_id),
UBI_FM_SB_VOLUME_ID);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
} else {
if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
be32_to_cpu(vh->vol_id),
UBI_FM_DATA_VOLUME_ID);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
}
if (sqnum < be64_to_cpu(vh->sqnum))
sqnum = be64_to_cpu(vh->sqnum);
ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
pnum, 0, ubi->leb_size);
if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
"err: %i)", i, pnum, ret);
goto free_hdr;
}
}
kfree(fmsb);
fmsb = NULL;
fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
tmp_crc = be32_to_cpu(fmsb2->data_crc);
fmsb2->data_crc = 0;
crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
if (crc != tmp_crc) {
ubi_err(ubi, "fastmap data CRC is invalid");
ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
tmp_crc, crc);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
fmsb2->sqnum = sqnum;
fm->used_blocks = used_blocks;
ret = ubi_attach_fastmap(ubi, ai, fm);
if (ret) {
if (ret > 0)
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
for (i = 0; i < used_blocks; i++) {
struct ubi_wl_entry *e;
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e) {
while (i--)
kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
ret = -ENOMEM;
goto free_hdr;
}
e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
e->ec = be32_to_cpu(fmsb2->block_ec[i]);
fm->e[i] = e;
}
ubi->fm = fm;
ubi->fm_pool.max_size = ubi->fm->max_pool_size;
ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
ubi_msg(ubi, "attached by fastmap");
ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
ubi_msg(ubi, "fastmap WL pool size: %d",
ubi->fm_wl_pool.max_size);
ubi->fm_disabled = 0;
ubi->fast_attach = 1;
ubi_free_vid_buf(vb);
kfree(ech);
out:
up_write(&ubi->fm_protect);
if (ret == UBI_BAD_FASTMAP)
ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
return ret;
free_hdr:
ubi_free_vid_buf(vb);
kfree(ech);
free_fm_sb:
kfree(fmsb);
kfree(fm);
goto out;
}
int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
{
struct ubi_device *ubi = vol->ubi;
if (!ubi->fast_attach)
return 0;
vol->checkmap = bitmap_zalloc(leb_count, GFP_KERNEL);
if (!vol->checkmap)
return -ENOMEM;
return 0;
}
void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
{
bitmap_free(vol->checkmap);
}
/**
* ubi_write_fastmap - writes a fastmap.
* @ubi: UBI device object
* @new_fm: the to be written fastmap
*
* Returns 0 on success, < 0 indicates an internal error.
*/
static int ubi_write_fastmap(struct ubi_device *ubi,
struct ubi_fastmap_layout *new_fm)
{
size_t fm_pos = 0;
void *fm_raw;
struct ubi_fm_sb *fmsb;
struct ubi_fm_hdr *fmh;
struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
struct ubi_fm_ec *fec;
struct ubi_fm_volhdr *fvh;
struct ubi_fm_eba *feba;
struct ubi_wl_entry *wl_e;
struct ubi_volume *vol;
struct ubi_vid_io_buf *avbuf, *dvbuf;
struct ubi_vid_hdr *avhdr, *dvhdr;
struct ubi_work *ubi_wrk;
struct rb_node *tmp_rb;
int ret, i, j, free_peb_count, used_peb_count, vol_count;
int scrub_peb_count, erase_peb_count;
unsigned long *seen_pebs;
fm_raw = ubi->fm_buf;
memset(ubi->fm_buf, 0, ubi->fm_size);
avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
if (!avbuf) {
ret = -ENOMEM;
goto out;
}
dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
if (!dvbuf) {
ret = -ENOMEM;
goto out_free_avbuf;
}
avhdr = ubi_get_vid_hdr(avbuf);
dvhdr = ubi_get_vid_hdr(dvbuf);
seen_pebs = init_seen(ubi);
if (IS_ERR(seen_pebs)) {
ret = PTR_ERR(seen_pebs);
goto out_free_dvbuf;
}
spin_lock(&ubi->volumes_lock);
spin_lock(&ubi->wl_lock);
fmsb = (struct ubi_fm_sb *)fm_raw;
fm_pos += sizeof(*fmsb);
ubi_assert(fm_pos <= ubi->fm_size);
fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmh);
ubi_assert(fm_pos <= ubi->fm_size);
fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
fmsb->version = UBI_FM_FMT_VERSION;
fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
/* the max sqnum will be filled in while *reading* the fastmap */
fmsb->sqnum = 0;
fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
free_peb_count = 0;
used_peb_count = 0;
scrub_peb_count = 0;
erase_peb_count = 0;
vol_count = 0;
fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmpl);
fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
fmpl->size = cpu_to_be16(ubi->fm_pool.size);
fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
for (i = 0; i < ubi->fm_pool.size; i++) {
fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
}
fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmpl_wl);
fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
for (i = 0; i < ubi->fm_wl_pool.size; i++) {
fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
}
ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
set_seen(ubi, wl_e->pnum, seen_pebs);
fec->ec = cpu_to_be32(wl_e->ec);
free_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
fmh->free_peb_count = cpu_to_be32(free_peb_count);
ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
set_seen(ubi, wl_e->pnum, seen_pebs);
fec->ec = cpu_to_be32(wl_e->ec);
used_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
ubi_for_each_protected_peb(ubi, i, wl_e) {
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
set_seen(ubi, wl_e->pnum, seen_pebs);
fec->ec = cpu_to_be32(wl_e->ec);
used_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
fmh->used_peb_count = cpu_to_be32(used_peb_count);
ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
set_seen(ubi, wl_e->pnum, seen_pebs);
fec->ec = cpu_to_be32(wl_e->ec);
scrub_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
list_for_each_entry(ubi_wrk, &ubi->works, list) {
if (ubi_is_erase_work(ubi_wrk)) {
wl_e = ubi_wrk->e;
ubi_assert(wl_e);
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
set_seen(ubi, wl_e->pnum, seen_pebs);
fec->ec = cpu_to_be32(wl_e->ec);
erase_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
}
fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
vol = ubi->volumes[i];
if (!vol)
continue;
vol_count++;
fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
fm_pos += sizeof(*fvh);
ubi_assert(fm_pos <= ubi->fm_size);
fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
fvh->vol_id = cpu_to_be32(vol->vol_id);
fvh->vol_type = vol->vol_type;
fvh->used_ebs = cpu_to_be32(vol->used_ebs);
fvh->data_pad = cpu_to_be32(vol->data_pad);
fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
vol->vol_type == UBI_STATIC_VOLUME);
feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
ubi_assert(fm_pos <= ubi->fm_size);
for (j = 0; j < vol->reserved_pebs; j++) {
struct ubi_eba_leb_desc ldesc;
ubi_eba_get_ldesc(vol, j, &ldesc);
feba->pnum[j] = cpu_to_be32(ldesc.pnum);
}
feba->reserved_pebs = cpu_to_be32(j);
feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
}
fmh->vol_count = cpu_to_be32(vol_count);
fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
avhdr->lnum = 0;
spin_unlock(&ubi->wl_lock);
spin_unlock(&ubi->volumes_lock);
dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
if (ret) {
ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
goto out_free_seen;
}
for (i = 0; i < new_fm->used_blocks; i++) {
fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
}
fmsb->data_crc = 0;
fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
ubi->fm_size));
for (i = 1; i < new_fm->used_blocks; i++) {
dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
dvhdr->lnum = cpu_to_be32(i);
dbg_bld("writing fastmap data to PEB %i sqnum %llu",
new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
if (ret) {
ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
new_fm->e[i]->pnum);
goto out_free_seen;
}
}
for (i = 0; i < new_fm->used_blocks; i++) {
ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
new_fm->e[i]->pnum, 0, ubi->leb_size);
if (ret) {
ubi_err(ubi, "unable to write fastmap to PEB %i!",
new_fm->e[i]->pnum);
goto out_free_seen;
}
}
ubi_assert(new_fm);
ubi->fm = new_fm;
ret = self_check_seen(ubi, seen_pebs);
dbg_bld("fastmap written!");
out_free_seen:
free_seen(seen_pebs);
out_free_dvbuf:
ubi_free_vid_buf(dvbuf);
out_free_avbuf:
ubi_free_vid_buf(avbuf);
out:
return ret;
}
/**
* erase_block - Manually erase a PEB.
* @ubi: UBI device object
* @pnum: PEB to be erased
*
* Returns the new EC value on success, < 0 indicates an internal error.
*/
static int erase_block(struct ubi_device *ubi, int pnum)
{
int ret;
struct ubi_ec_hdr *ec_hdr;
long long ec;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ec_hdr)
return -ENOMEM;
ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
if (ret < 0)
goto out;
else if (ret && ret != UBI_IO_BITFLIPS) {
ret = -EINVAL;
goto out;
}
ret = ubi_io_sync_erase(ubi, pnum, 0);
if (ret < 0)
goto out;
ec = be64_to_cpu(ec_hdr->ec);
ec += ret;
if (ec > UBI_MAX_ERASECOUNTER) {
ret = -EINVAL;
goto out;
}
ec_hdr->ec = cpu_to_be64(ec);
ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
if (ret < 0)
goto out;
ret = ec;
out:
kfree(ec_hdr);
return ret;
}
/**
* invalidate_fastmap - destroys a fastmap.
* @ubi: UBI device object
*
* This function ensures that upon next UBI attach a full scan
* is issued. We need this if UBI is about to write a new fastmap
* but is unable to do so. In this case we have two options:
* a) Make sure that the current fastmap will not be usued upon
* attach time and contine or b) fall back to RO mode to have the
* current fastmap in a valid state.
* Returns 0 on success, < 0 indicates an internal error.
*/
static int invalidate_fastmap(struct ubi_device *ubi)
{
int ret;
struct ubi_fastmap_layout *fm;
struct ubi_wl_entry *e;
struct ubi_vid_io_buf *vb = NULL;
struct ubi_vid_hdr *vh;
if (!ubi->fm)
return 0;
ubi->fm = NULL;
ret = -ENOMEM;
fm = kzalloc(sizeof(*fm), GFP_KERNEL);
if (!fm)
goto out;
vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
if (!vb)
goto out_free_fm;
vh = ubi_get_vid_hdr(vb);
ret = -ENOSPC;
e = ubi_wl_get_fm_peb(ubi, 1);
if (!e)
goto out_free_fm;
/*
* Create fake fastmap such that UBI will fall back
* to scanning mode.
*/
vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
if (ret < 0) {
ubi_wl_put_fm_peb(ubi, e, 0, 0);
goto out_free_fm;
}
fm->used_blocks = 1;
fm->e[0] = e;
ubi->fm = fm;
out:
ubi_free_vid_buf(vb);
return ret;
out_free_fm:
kfree(fm);
goto out;
}
/**
* return_fm_pebs - returns all PEBs used by a fastmap back to the
* WL sub-system.
* @ubi: UBI device object
* @fm: fastmap layout object
*/
static void return_fm_pebs(struct ubi_device *ubi,
struct ubi_fastmap_layout *fm)
{
int i;
if (!fm)
return;
for (i = 0; i < fm->used_blocks; i++) {
if (fm->e[i]) {
ubi_wl_put_fm_peb(ubi, fm->e[i], i,
fm->to_be_tortured[i]);
fm->e[i] = NULL;
}
}
}
/**
* ubi_update_fastmap - will be called by UBI if a volume changes or
* a fastmap pool becomes full.
* @ubi: UBI device object
*
* Returns 0 on success, < 0 indicates an internal error.
*/
int ubi_update_fastmap(struct ubi_device *ubi)
{
int ret, i, j;
struct ubi_fastmap_layout *new_fm, *old_fm;
struct ubi_wl_entry *tmp_e;
down_write(&ubi->fm_protect);
down_write(&ubi->work_sem);
down_write(&ubi->fm_eba_sem);
ubi_refill_pools(ubi);
if (ubi->ro_mode || ubi->fm_disabled) {
up_write(&ubi->fm_eba_sem);
up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
return 0;
}
new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
if (!new_fm) {
up_write(&ubi->fm_eba_sem);
up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
return -ENOMEM;
}
new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
old_fm = ubi->fm;
ubi->fm = NULL;
if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
ubi_err(ubi, "fastmap too large");
ret = -ENOSPC;
goto err;
}
for (i = 1; i < new_fm->used_blocks; i++) {
spin_lock(&ubi->wl_lock);
tmp_e = ubi_wl_get_fm_peb(ubi, 0);
spin_unlock(&ubi->wl_lock);
if (!tmp_e) {
if (old_fm && old_fm->e[i]) {
ret = erase_block(ubi, old_fm->e[i]->pnum);
if (ret < 0) {
ubi_err(ubi, "could not erase old fastmap PEB");
for (j = 1; j < i; j++) {
ubi_wl_put_fm_peb(ubi, new_fm->e[j],
j, 0);
new_fm->e[j] = NULL;
}
goto err;
}
new_fm->e[i] = old_fm->e[i];
old_fm->e[i] = NULL;
} else {
ubi_err(ubi, "could not get any free erase block");
for (j = 1; j < i; j++) {
ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
new_fm->e[j] = NULL;
}
ret = -ENOSPC;
goto err;
}
} else {
new_fm->e[i] = tmp_e;
if (old_fm && old_fm->e[i]) {
ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
old_fm->to_be_tortured[i]);
old_fm->e[i] = NULL;
}
}
}
/* Old fastmap is larger than the new one */
if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
old_fm->to_be_tortured[i]);
old_fm->e[i] = NULL;
}
}
spin_lock(&ubi->wl_lock);
tmp_e = ubi->fm_anchor;
ubi->fm_anchor = NULL;
spin_unlock(&ubi->wl_lock);
if (old_fm) {
/* no fresh anchor PEB was found, reuse the old one */
if (!tmp_e) {
ret = erase_block(ubi, old_fm->e[0]->pnum);
if (ret < 0) {
ubi_err(ubi, "could not erase old anchor PEB");
for (i = 1; i < new_fm->used_blocks; i++) {
ubi_wl_put_fm_peb(ubi, new_fm->e[i],
i, 0);
new_fm->e[i] = NULL;
}
goto err;
}
new_fm->e[0] = old_fm->e[0];
new_fm->e[0]->ec = ret;
old_fm->e[0] = NULL;
} else {
/* we've got a new anchor PEB, return the old one */
ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
old_fm->to_be_tortured[0]);
new_fm->e[0] = tmp_e;
old_fm->e[0] = NULL;
}
} else {
if (!tmp_e) {
ubi_err(ubi, "could not find any anchor PEB");
for (i = 1; i < new_fm->used_blocks; i++) {
ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
new_fm->e[i] = NULL;
}
ret = -ENOSPC;
goto err;
}
new_fm->e[0] = tmp_e;
}
ret = ubi_write_fastmap(ubi, new_fm);
if (ret)
goto err;
out_unlock:
up_write(&ubi->fm_eba_sem);
up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
kfree(old_fm);
ubi_ensure_anchor_pebs(ubi);
return ret;
err:
ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
ret = invalidate_fastmap(ubi);
if (ret < 0) {
ubi_err(ubi, "Unable to invalidate current fastmap!");
ubi_ro_mode(ubi);
} else {
return_fm_pebs(ubi, old_fm);
return_fm_pebs(ubi, new_fm);
ret = 0;
}
kfree(new_fm);
goto out_unlock;
}
| linux-master | drivers/mtd/ubi/fastmap.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) International Business Machines Corp., 2006
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/*
* This file contains implementation of volume creation, deletion, updating and
* resizing.
*/
#include <linux/err.h>
#include <linux/math64.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "ubi.h"
static int self_check_volumes(struct ubi_device *ubi);
static ssize_t vol_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf);
/* Device attributes corresponding to files in '/<sysfs>/class/ubi/ubiX_Y' */
static struct device_attribute attr_vol_reserved_ebs =
__ATTR(reserved_ebs, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute attr_vol_type =
__ATTR(type, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute attr_vol_name =
__ATTR(name, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute attr_vol_corrupted =
__ATTR(corrupted, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute attr_vol_alignment =
__ATTR(alignment, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute attr_vol_usable_eb_size =
__ATTR(usable_eb_size, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute attr_vol_data_bytes =
__ATTR(data_bytes, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute attr_vol_upd_marker =
__ATTR(upd_marker, S_IRUGO, vol_attribute_show, NULL);
/*
* "Show" method for files in '/<sysfs>/class/ubi/ubiX_Y/'.
*
* Consider a situation:
* A. process 1 opens a sysfs file related to volume Y, say
* /<sysfs>/class/ubi/ubiX_Y/reserved_ebs;
* B. process 2 removes volume Y;
* C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file;
*
* In this situation, this function will return %-ENODEV because it will find
* out that the volume was removed from the @ubi->volumes array.
*/
static ssize_t vol_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
struct ubi_device *ubi = vol->ubi;
spin_lock(&ubi->volumes_lock);
if (!ubi->volumes[vol->vol_id]) {
spin_unlock(&ubi->volumes_lock);
return -ENODEV;
}
/* Take a reference to prevent volume removal */
vol->ref_count += 1;
spin_unlock(&ubi->volumes_lock);
if (attr == &attr_vol_reserved_ebs)
ret = sprintf(buf, "%d\n", vol->reserved_pebs);
else if (attr == &attr_vol_type) {
const char *tp;
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
tp = "dynamic";
else
tp = "static";
ret = sprintf(buf, "%s\n", tp);
} else if (attr == &attr_vol_name)
ret = sprintf(buf, "%s\n", vol->name);
else if (attr == &attr_vol_corrupted)
ret = sprintf(buf, "%d\n", vol->corrupted);
else if (attr == &attr_vol_alignment)
ret = sprintf(buf, "%d\n", vol->alignment);
else if (attr == &attr_vol_usable_eb_size)
ret = sprintf(buf, "%d\n", vol->usable_leb_size);
else if (attr == &attr_vol_data_bytes)
ret = sprintf(buf, "%lld\n", vol->used_bytes);
else if (attr == &attr_vol_upd_marker)
ret = sprintf(buf, "%d\n", vol->upd_marker);
else
/* This must be a bug */
ret = -EINVAL;
/* We've done the operation, drop volume and UBI device references */
spin_lock(&ubi->volumes_lock);
vol->ref_count -= 1;
ubi_assert(vol->ref_count >= 0);
spin_unlock(&ubi->volumes_lock);
return ret;
}
static struct attribute *volume_dev_attrs[] = {
&attr_vol_reserved_ebs.attr,
&attr_vol_type.attr,
&attr_vol_name.attr,
&attr_vol_corrupted.attr,
&attr_vol_alignment.attr,
&attr_vol_usable_eb_size.attr,
&attr_vol_data_bytes.attr,
&attr_vol_upd_marker.attr,
NULL
};
ATTRIBUTE_GROUPS(volume_dev);
/* Release method for volume devices */
static void vol_release(struct device *dev)
{
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
ubi_eba_replace_table(vol, NULL);
ubi_fastmap_destroy_checkmap(vol);
kfree(vol);
}
/**
* ubi_create_volume - create volume.
* @ubi: UBI device description object
* @req: volume creation request
*
* This function creates volume described by @req. If @req->vol_id id
* %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
* and saves it in @req->vol_id. Returns zero in case of success and a negative
* error code in case of failure. Note, the caller has to have the
* @ubi->device_mutex locked.
*/
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
{
int i, err, vol_id = req->vol_id;
struct ubi_volume *vol;
struct ubi_vtbl_record vtbl_rec;
struct ubi_eba_table *eba_tbl = NULL;
if (ubi->ro_mode)
return -EROFS;
vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
if (!vol)
return -ENOMEM;
device_initialize(&vol->dev);
vol->dev.release = vol_release;
vol->dev.parent = &ubi->dev;
vol->dev.class = &ubi_class;
vol->dev.groups = volume_dev_groups;
if (req->flags & UBI_VOL_SKIP_CRC_CHECK_FLG)
vol->skip_check = 1;
spin_lock(&ubi->volumes_lock);
if (vol_id == UBI_VOL_NUM_AUTO) {
/* Find unused volume ID */
dbg_gen("search for vacant volume ID");
for (i = 0; i < ubi->vtbl_slots; i++)
if (!ubi->volumes[i]) {
vol_id = i;
break;
}
if (vol_id == UBI_VOL_NUM_AUTO) {
ubi_err(ubi, "out of volume IDs");
err = -ENFILE;
goto out_unlock;
}
req->vol_id = vol_id;
}
dbg_gen("create device %d, volume %d, %llu bytes, type %d, name %s",
ubi->ubi_num, vol_id, (unsigned long long)req->bytes,
(int)req->vol_type, req->name);
/* Ensure that this volume does not exist */
err = -EEXIST;
if (ubi->volumes[vol_id]) {
ubi_err(ubi, "volume %d already exists", vol_id);
goto out_unlock;
}
/* Ensure that the name is unique */
for (i = 0; i < ubi->vtbl_slots; i++)
if (ubi->volumes[i] &&
ubi->volumes[i]->name_len == req->name_len &&
!strcmp(ubi->volumes[i]->name, req->name)) {
ubi_err(ubi, "volume \"%s\" exists (ID %d)",
req->name, i);
goto out_unlock;
}
/* Calculate how many eraseblocks are requested */
vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
vol->reserved_pebs = div_u64(req->bytes + vol->usable_leb_size - 1,
vol->usable_leb_size);
/* Reserve physical eraseblocks */
if (vol->reserved_pebs > ubi->avail_pebs) {
ubi_err(ubi, "not enough PEBs, only %d available",
ubi->avail_pebs);
if (ubi->corr_peb_count)
ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
err = -ENOSPC;
goto out_unlock;
}
ubi->avail_pebs -= vol->reserved_pebs;
ubi->rsvd_pebs += vol->reserved_pebs;
spin_unlock(&ubi->volumes_lock);
vol->vol_id = vol_id;
vol->alignment = req->alignment;
vol->data_pad = ubi->leb_size % vol->alignment;
vol->vol_type = req->vol_type;
vol->name_len = req->name_len;
memcpy(vol->name, req->name, vol->name_len);
vol->ubi = ubi;
/*
* Finish all pending erases because there may be some LEBs belonging
* to the same volume ID.
*/
err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
if (err)
goto out_acc;
eba_tbl = ubi_eba_create_table(vol, vol->reserved_pebs);
if (IS_ERR(eba_tbl)) {
err = PTR_ERR(eba_tbl);
goto out_acc;
}
ubi_eba_replace_table(vol, eba_tbl);
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = vol->reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
vol->used_bytes =
(long long)vol->used_ebs * vol->usable_leb_size;
} else {
vol->used_ebs = div_u64_rem(vol->used_bytes,
vol->usable_leb_size,
&vol->last_eb_bytes);
if (vol->last_eb_bytes != 0)
vol->used_ebs += 1;
else
vol->last_eb_bytes = vol->usable_leb_size;
}
/* Make volume "available" before it becomes accessible via sysfs */
spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = vol;
ubi->vol_count += 1;
spin_unlock(&ubi->volumes_lock);
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
vol->dev.devt = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = cdev_device_add(&vol->cdev, &vol->dev);
if (err) {
ubi_err(ubi, "cannot add device");
goto out_mapping;
}
/* Fill volume table record */
memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record));
vtbl_rec.reserved_pebs = cpu_to_be32(vol->reserved_pebs);
vtbl_rec.alignment = cpu_to_be32(vol->alignment);
vtbl_rec.data_pad = cpu_to_be32(vol->data_pad);
vtbl_rec.name_len = cpu_to_be16(vol->name_len);
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
vtbl_rec.vol_type = UBI_VID_DYNAMIC;
else
vtbl_rec.vol_type = UBI_VID_STATIC;
if (vol->skip_check)
vtbl_rec.flags |= UBI_VTBL_SKIP_CRC_CHECK_FLG;
memcpy(vtbl_rec.name, vol->name, vol->name_len);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
goto out_sysfs;
ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
self_check_volumes(ubi);
return err;
out_sysfs:
/*
* We have registered our device, we should not free the volume
* description object in this function in case of an error - it is
* freed by the release function.
*/
cdev_device_del(&vol->cdev, &vol->dev);
out_mapping:
spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = NULL;
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
out_acc:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
ubi->avail_pebs += vol->reserved_pebs;
out_unlock:
spin_unlock(&ubi->volumes_lock);
put_device(&vol->dev);
ubi_err(ubi, "cannot create volume %d, error %d", vol_id, err);
return err;
}
/**
* ubi_remove_volume - remove volume.
* @desc: volume descriptor
* @no_vtbl: do not change volume table if not zero
*
* This function removes volume described by @desc. The volume has to be opened
* in "exclusive" mode. Returns zero in case of success and a negative error
* code in case of failure. The caller has to have the @ubi->device_mutex
* locked.
*/
int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs;
dbg_gen("remove device %d, volume %d", ubi->ubi_num, vol_id);
ubi_assert(desc->mode == UBI_EXCLUSIVE);
ubi_assert(vol == ubi->volumes[vol_id]);
if (ubi->ro_mode)
return -EROFS;
spin_lock(&ubi->volumes_lock);
if (vol->ref_count > 1) {
/*
* The volume is busy, probably someone is reading one of its
* sysfs files.
*/
err = -EBUSY;
goto out_unlock;
}
ubi->volumes[vol_id] = NULL;
spin_unlock(&ubi->volumes_lock);
if (!no_vtbl) {
err = ubi_change_vtbl_record(ubi, vol_id, NULL);
if (err)
goto out_err;
}
for (i = 0; i < vol->reserved_pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol, i);
if (err)
goto out_err;
}
cdev_device_del(&vol->cdev, &vol->dev);
put_device(&vol->dev);
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= reserved_pebs;
ubi->avail_pebs += reserved_pebs;
ubi_update_reserved(ubi);
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
if (!no_vtbl)
self_check_volumes(ubi);
return 0;
out_err:
ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err);
spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = vol;
out_unlock:
spin_unlock(&ubi->volumes_lock);
return err;
}
/**
* ubi_resize_volume - re-size volume.
* @desc: volume descriptor
* @reserved_pebs: new size in physical eraseblocks
*
* This function re-sizes the volume and returns zero in case of success, and a
* negative error code in case of failure. The caller has to have the
* @ubi->device_mutex locked.
*/
int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
{
int i, err, pebs;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
struct ubi_vtbl_record vtbl_rec;
struct ubi_eba_table *new_eba_tbl = NULL;
int vol_id = vol->vol_id;
if (ubi->ro_mode)
return -EROFS;
dbg_gen("re-size device %d, volume %d to from %d to %d PEBs",
ubi->ubi_num, vol_id, vol->reserved_pebs, reserved_pebs);
if (vol->vol_type == UBI_STATIC_VOLUME &&
reserved_pebs < vol->used_ebs) {
ubi_err(ubi, "too small size %d, %d LEBs contain data",
reserved_pebs, vol->used_ebs);
return -EINVAL;
}
/* If the size is the same, we have nothing to do */
if (reserved_pebs == vol->reserved_pebs)
return 0;
new_eba_tbl = ubi_eba_create_table(vol, reserved_pebs);
if (IS_ERR(new_eba_tbl))
return PTR_ERR(new_eba_tbl);
spin_lock(&ubi->volumes_lock);
if (vol->ref_count > 1) {
spin_unlock(&ubi->volumes_lock);
err = -EBUSY;
goto out_free;
}
spin_unlock(&ubi->volumes_lock);
/* Reserve physical eraseblocks */
pebs = reserved_pebs - vol->reserved_pebs;
if (pebs > 0) {
spin_lock(&ubi->volumes_lock);
if (pebs > ubi->avail_pebs) {
ubi_err(ubi, "not enough PEBs: requested %d, available %d",
pebs, ubi->avail_pebs);
if (ubi->corr_peb_count)
ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
spin_unlock(&ubi->volumes_lock);
err = -ENOSPC;
goto out_free;
}
ubi->avail_pebs -= pebs;
ubi->rsvd_pebs += pebs;
ubi_eba_copy_table(vol, new_eba_tbl, vol->reserved_pebs);
ubi_eba_replace_table(vol, new_eba_tbl);
spin_unlock(&ubi->volumes_lock);
}
if (pebs < 0) {
for (i = 0; i < -pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
if (err)
goto out_free;
}
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs += pebs;
ubi->avail_pebs -= pebs;
ubi_update_reserved(ubi);
ubi_eba_copy_table(vol, new_eba_tbl, reserved_pebs);
ubi_eba_replace_table(vol, new_eba_tbl);
spin_unlock(&ubi->volumes_lock);
}
/*
* When we shrink a volume we have to flush all pending (erase) work.
* Otherwise it can happen that upon next attach UBI finds a LEB with
* lnum > highest_lnum and refuses to attach.
*/
if (pebs < 0) {
err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
if (err)
goto out_acc;
}
/* Change volume table record */
vtbl_rec = ubi->vtbl[vol_id];
vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
goto out_acc;
vol->reserved_pebs = reserved_pebs;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
vol->used_bytes =
(long long)vol->used_ebs * vol->usable_leb_size;
}
ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
self_check_volumes(ubi);
return err;
out_acc:
if (pebs > 0) {
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= pebs;
ubi->avail_pebs += pebs;
spin_unlock(&ubi->volumes_lock);
}
return err;
out_free:
ubi_eba_destroy_table(new_eba_tbl);
return err;
}
/**
* ubi_rename_volumes - re-name UBI volumes.
* @ubi: UBI device description object
* @rename_list: list of &struct ubi_rename_entry objects
*
* This function re-names or removes volumes specified in the re-name list.
* Returns zero in case of success and a negative error code in case of
* failure.
*/
int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
{
int err;
struct ubi_rename_entry *re;
err = ubi_vtbl_rename_volumes(ubi, rename_list);
if (err)
return err;
list_for_each_entry(re, rename_list, list) {
if (re->remove) {
err = ubi_remove_volume(re->desc, 1);
if (err)
break;
} else {
struct ubi_volume *vol = re->desc->vol;
spin_lock(&ubi->volumes_lock);
vol->name_len = re->new_name_len;
memcpy(vol->name, re->new_name, re->new_name_len + 1);
spin_unlock(&ubi->volumes_lock);
ubi_volume_notify(ubi, vol, UBI_VOLUME_RENAMED);
}
}
if (!err)
self_check_volumes(ubi);
return err;
}
/**
* ubi_add_volume - add volume.
* @ubi: UBI device description object
* @vol: volume description object
*
* This function adds an existing volume and initializes all its data
* structures. Returns zero in case of success and a negative error code in
* case of failure.
*/
int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
{
int err, vol_id = vol->vol_id;
dev_t dev;
dbg_gen("add volume %d", vol_id);
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
err = cdev_add(&vol->cdev, dev, 1);
if (err) {
ubi_err(ubi, "cannot add character device for volume %d, error %d",
vol_id, err);
vol_release(&vol->dev);
return err;
}
vol->dev.release = vol_release;
vol->dev.parent = &ubi->dev;
vol->dev.devt = dev;
vol->dev.class = &ubi_class;
vol->dev.groups = volume_dev_groups;
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
if (err) {
cdev_del(&vol->cdev);
put_device(&vol->dev);
return err;
}
self_check_volumes(ubi);
return err;
}
/**
* ubi_free_volume - free volume.
* @ubi: UBI device description object
* @vol: volume description object
*
* This function frees all resources for volume @vol but does not remove it.
* Used only when the UBI device is detached.
*/
void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
{
dbg_gen("free volume %d", vol->vol_id);
ubi->volumes[vol->vol_id] = NULL;
cdev_del(&vol->cdev);
device_unregister(&vol->dev);
}
/**
* self_check_volume - check volume information.
* @ubi: UBI device description object
* @vol_id: volume ID
*
* Returns zero if volume is all right and a negative error code if not.
*/
static int self_check_volume(struct ubi_device *ubi, int vol_id)
{
int idx = vol_id2idx(ubi, vol_id);
int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
const struct ubi_volume *vol;
long long n;
const char *name;
spin_lock(&ubi->volumes_lock);
reserved_pebs = be32_to_cpu(ubi->vtbl[vol_id].reserved_pebs);
vol = ubi->volumes[idx];
if (!vol) {
if (reserved_pebs) {
ubi_err(ubi, "no volume info, but volume exists");
goto fail;
}
spin_unlock(&ubi->volumes_lock);
return 0;
}
if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 ||
vol->name_len < 0) {
ubi_err(ubi, "negative values");
goto fail;
}
if (vol->alignment > ubi->leb_size || vol->alignment == 0) {
ubi_err(ubi, "bad alignment");
goto fail;
}
n = vol->alignment & (ubi->min_io_size - 1);
if (vol->alignment != 1 && n) {
ubi_err(ubi, "alignment is not multiple of min I/O unit");
goto fail;
}
n = ubi->leb_size % vol->alignment;
if (vol->data_pad != n) {
ubi_err(ubi, "bad data_pad, has to be %lld", n);
goto fail;
}
if (vol->vol_type != UBI_DYNAMIC_VOLUME &&
vol->vol_type != UBI_STATIC_VOLUME) {
ubi_err(ubi, "bad vol_type");
goto fail;
}
if (vol->upd_marker && vol->corrupted) {
ubi_err(ubi, "update marker and corrupted simultaneously");
goto fail;
}
if (vol->reserved_pebs > ubi->good_peb_count) {
ubi_err(ubi, "too large reserved_pebs");
goto fail;
}
n = ubi->leb_size - vol->data_pad;
if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) {
ubi_err(ubi, "bad usable_leb_size, has to be %lld", n);
goto fail;
}
if (vol->name_len > UBI_VOL_NAME_MAX) {
ubi_err(ubi, "too long volume name, max is %d",
UBI_VOL_NAME_MAX);
goto fail;
}
n = strnlen(vol->name, vol->name_len + 1);
if (n != vol->name_len) {
ubi_err(ubi, "bad name_len %lld", n);
goto fail;
}
n = (long long)vol->used_ebs * vol->usable_leb_size;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
if (vol->corrupted) {
ubi_err(ubi, "corrupted dynamic volume");
goto fail;
}
if (vol->used_ebs != vol->reserved_pebs) {
ubi_err(ubi, "bad used_ebs");
goto fail;
}
if (vol->last_eb_bytes != vol->usable_leb_size) {
ubi_err(ubi, "bad last_eb_bytes");
goto fail;
}
if (vol->used_bytes != n) {
ubi_err(ubi, "bad used_bytes");
goto fail;
}
if (vol->skip_check) {
ubi_err(ubi, "bad skip_check");
goto fail;
}
} else {
if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
ubi_err(ubi, "bad used_ebs");
goto fail;
}
if (vol->last_eb_bytes < 0 ||
vol->last_eb_bytes > vol->usable_leb_size) {
ubi_err(ubi, "bad last_eb_bytes");
goto fail;
}
if (vol->used_bytes < 0 || vol->used_bytes > n ||
vol->used_bytes < n - vol->usable_leb_size) {
ubi_err(ubi, "bad used_bytes");
goto fail;
}
}
alignment = be32_to_cpu(ubi->vtbl[vol_id].alignment);
data_pad = be32_to_cpu(ubi->vtbl[vol_id].data_pad);
name_len = be16_to_cpu(ubi->vtbl[vol_id].name_len);
upd_marker = ubi->vtbl[vol_id].upd_marker;
name = &ubi->vtbl[vol_id].name[0];
if (ubi->vtbl[vol_id].vol_type == UBI_VID_DYNAMIC)
vol_type = UBI_DYNAMIC_VOLUME;
else
vol_type = UBI_STATIC_VOLUME;
if (alignment != vol->alignment || data_pad != vol->data_pad ||
upd_marker != vol->upd_marker || vol_type != vol->vol_type ||
name_len != vol->name_len || strncmp(name, vol->name, name_len)) {
ubi_err(ubi, "volume info is different");
goto fail;
}
spin_unlock(&ubi->volumes_lock);
return 0;
fail:
ubi_err(ubi, "self-check failed for volume %d", vol_id);
if (vol)
ubi_dump_vol_info(vol);
ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
dump_stack();
spin_unlock(&ubi->volumes_lock);
return -EINVAL;
}
/**
* self_check_volumes - check information about all volumes.
* @ubi: UBI device description object
*
* Returns zero if volumes are all right and a negative error code if not.
*/
static int self_check_volumes(struct ubi_device *ubi)
{
int i, err = 0;
if (!ubi_dbg_chk_gen(ubi))
return 0;
for (i = 0; i < ubi->vtbl_slots; i++) {
err = self_check_volume(ubi, i);
if (err)
break;
}
return err;
}
| linux-master | drivers/mtd/ubi/vmt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) International Business Machines Corp., 2006
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/* Here we keep miscellaneous functions which are used all over the UBI code */
#include "ubi.h"
/**
* ubi_calc_data_len - calculate how much real data is stored in a buffer.
* @ubi: UBI device description object
* @buf: a buffer with the contents of the physical eraseblock
* @length: the buffer length
*
* This function calculates how much "real data" is stored in @buf and returnes
* the length. Continuous 0xFF bytes at the end of the buffer are not
* considered as "real data".
*/
int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
int length)
{
int i;
ubi_assert(!(length & (ubi->min_io_size - 1)));
for (i = length - 1; i >= 0; i--)
if (((const uint8_t *)buf)[i] != 0xFF)
break;
/* The resulting length must be aligned to the minimum flash I/O size */
length = ALIGN(i + 1, ubi->min_io_size);
return length;
}
/**
* ubi_check_volume - check the contents of a static volume.
* @ubi: UBI device description object
* @vol_id: ID of the volume to check
*
* This function checks if static volume @vol_id is corrupted by fully reading
* it and checking data CRC. This function returns %0 if the volume is not
* corrupted, %1 if it is corrupted and a negative error code in case of
* failure. Dynamic volumes are not checked and zero is returned immediately.
*/
int ubi_check_volume(struct ubi_device *ubi, int vol_id)
{
void *buf;
int err = 0, i;
struct ubi_volume *vol = ubi->volumes[vol_id];
if (vol->vol_type != UBI_STATIC_VOLUME)
return 0;
buf = vmalloc(vol->usable_leb_size);
if (!buf)
return -ENOMEM;
for (i = 0; i < vol->used_ebs; i++) {
int size;
cond_resched();
if (i == vol->used_ebs - 1)
size = vol->last_eb_bytes;
else
size = vol->usable_leb_size;
err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
if (err) {
if (mtd_is_eccerr(err))
err = 1;
break;
}
}
vfree(buf);
return err;
}
/**
* ubi_update_reserved - update bad eraseblock handling accounting data.
* @ubi: UBI device description object
*
* This function calculates the gap between current number of PEBs reserved for
* bad eraseblock handling and the required level of PEBs that must be
* reserved, and if necessary, reserves more PEBs to fill that gap, according
* to availability. Should be called with ubi->volumes_lock held.
*/
void ubi_update_reserved(struct ubi_device *ubi)
{
int need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
if (need <= 0 || ubi->avail_pebs == 0)
return;
need = min_t(int, need, ubi->avail_pebs);
ubi->avail_pebs -= need;
ubi->rsvd_pebs += need;
ubi->beb_rsvd_pebs += need;
ubi_msg(ubi, "reserved more %d PEBs for bad PEB handling", need);
}
/**
* ubi_calculate_reserved - calculate how many PEBs must be reserved for bad
* eraseblock handling.
* @ubi: UBI device description object
*/
void ubi_calculate_reserved(struct ubi_device *ubi)
{
/*
* Calculate the actual number of PEBs currently needed to be reserved
* for future bad eraseblock handling.
*/
ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
if (ubi->beb_rsvd_level < 0) {
ubi->beb_rsvd_level = 0;
ubi_warn(ubi, "number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
ubi->bad_peb_count, ubi->bad_peb_limit);
}
}
/**
* ubi_check_pattern - check if buffer contains only a certain byte pattern.
* @buf: buffer to check
* @patt: the pattern to check
* @size: buffer size in bytes
*
* This function returns %1 in there are only @patt bytes in @buf, and %0 if
* something else was also found.
*/
int ubi_check_pattern(const void *buf, uint8_t patt, int size)
{
int i;
for (i = 0; i < size; i++)
if (((const uint8_t *)buf)[i] != patt)
return 0;
return 1;
}
/* Normal UBI messages */
void ubi_msg(const struct ubi_device *ubi, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
pr_notice(UBI_NAME_STR "%d: %pV\n", ubi->ubi_num, &vaf);
va_end(args);
}
/* UBI warning messages */
void ubi_warn(const struct ubi_device *ubi, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
pr_warn(UBI_NAME_STR "%d warning: %ps: %pV\n",
ubi->ubi_num, __builtin_return_address(0), &vaf);
va_end(args);
}
/* UBI error messages */
void ubi_err(const struct ubi_device *ubi, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
pr_err(UBI_NAME_STR "%d error: %ps: %pV\n",
ubi->ubi_num, __builtin_return_address(0), &vaf);
va_end(args);
}
| linux-master | drivers/mtd/ubi/misc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) International Business Machines Corp., 2006
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/*
* This file includes implementation of UBI character device operations.
*
* There are two kinds of character devices in UBI: UBI character devices and
* UBI volume character devices. UBI character devices allow users to
* manipulate whole volumes: create, remove, and re-size them. Volume character
* devices provide volume I/O capabilities.
*
* Major and minor numbers are assigned dynamically to both UBI and volume
* character devices.
*
* Well, there is the third kind of character devices - the UBI control
* character device, which allows to manipulate by UBI devices - create and
* delete them. In other words, it is used for attaching and detaching MTD
* devices.
*/
#include <linux/module.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
#include <linux/capability.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <linux/math64.h>
#include <mtd/ubi-user.h>
#include "ubi.h"
/**
* get_exclusive - get exclusive access to an UBI volume.
* @desc: volume descriptor
*
* This function changes UBI volume open mode to "exclusive". Returns previous
* mode value (positive integer) in case of success and a negative error code
* in case of failure.
*/
static int get_exclusive(struct ubi_volume_desc *desc)
{
int users, err;
struct ubi_volume *vol = desc->vol;
spin_lock(&vol->ubi->volumes_lock);
users = vol->readers + vol->writers + vol->exclusive + vol->metaonly;
ubi_assert(users > 0);
if (users > 1) {
ubi_err(vol->ubi, "%d users for volume %d", users, vol->vol_id);
err = -EBUSY;
} else {
vol->readers = vol->writers = vol->metaonly = 0;
vol->exclusive = 1;
err = desc->mode;
desc->mode = UBI_EXCLUSIVE;
}
spin_unlock(&vol->ubi->volumes_lock);
return err;
}
/**
* revoke_exclusive - revoke exclusive mode.
* @desc: volume descriptor
* @mode: new mode to switch to
*/
static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
{
struct ubi_volume *vol = desc->vol;
spin_lock(&vol->ubi->volumes_lock);
ubi_assert(vol->readers == 0 && vol->writers == 0 && vol->metaonly == 0);
ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE);
vol->exclusive = 0;
if (mode == UBI_READONLY)
vol->readers = 1;
else if (mode == UBI_READWRITE)
vol->writers = 1;
else if (mode == UBI_METAONLY)
vol->metaonly = 1;
else
vol->exclusive = 1;
spin_unlock(&vol->ubi->volumes_lock);
desc->mode = mode;
}
static int vol_cdev_open(struct inode *inode, struct file *file)
{
struct ubi_volume_desc *desc;
int vol_id = iminor(inode) - 1, mode, ubi_num;
ubi_num = ubi_major2num(imajor(inode));
if (ubi_num < 0)
return ubi_num;
if (file->f_mode & FMODE_WRITE)
mode = UBI_READWRITE;
else
mode = UBI_READONLY;
dbg_gen("open device %d, volume %d, mode %d",
ubi_num, vol_id, mode);
desc = ubi_open_volume(ubi_num, vol_id, mode);
if (IS_ERR(desc))
return PTR_ERR(desc);
file->private_data = desc;
return 0;
}
static int vol_cdev_release(struct inode *inode, struct file *file)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
dbg_gen("release device %d, volume %d, mode %d",
vol->ubi->ubi_num, vol->vol_id, desc->mode);
if (vol->updating) {
ubi_warn(vol->ubi, "update of volume %d not finished, volume is damaged",
vol->vol_id);
ubi_assert(!vol->changing_leb);
vol->updating = 0;
vfree(vol->upd_buf);
} else if (vol->changing_leb) {
dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel",
vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num,
vol->vol_id);
vol->changing_leb = 0;
vfree(vol->upd_buf);
}
ubi_close_volume(desc);
return 0;
}
static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
if (vol->updating) {
/* Update is in progress, seeking is prohibited */
ubi_err(vol->ubi, "updating");
return -EBUSY;
}
return fixed_size_llseek(file, offset, origin, vol->used_bytes);
}
static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_device *ubi = desc->vol->ubi;
struct inode *inode = file_inode(file);
int err;
inode_lock(inode);
err = ubi_sync(ubi->ubi_num);
inode_unlock(inode);
return err;
}
static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
loff_t *offp)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int err, lnum, off, len, tbuf_size;
size_t count_save = count;
void *tbuf;
dbg_gen("read %zd bytes from offset %lld of volume %d",
count, *offp, vol->vol_id);
if (vol->updating) {
ubi_err(vol->ubi, "updating");
return -EBUSY;
}
if (vol->upd_marker) {
ubi_err(vol->ubi, "damaged volume, update marker is set");
return -EBADF;
}
if (*offp == vol->used_bytes || count == 0)
return 0;
if (vol->corrupted)
dbg_gen("read from corrupted volume %d", vol->vol_id);
if (*offp + count > vol->used_bytes)
count_save = count = vol->used_bytes - *offp;
tbuf_size = vol->usable_leb_size;
if (count < tbuf_size)
tbuf_size = ALIGN(count, ubi->min_io_size);
tbuf = vmalloc(tbuf_size);
if (!tbuf)
return -ENOMEM;
len = count > tbuf_size ? tbuf_size : count;
lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
do {
cond_resched();
if (off + len >= vol->usable_leb_size)
len = vol->usable_leb_size - off;
err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
if (err)
break;
off += len;
if (off == vol->usable_leb_size) {
lnum += 1;
off -= vol->usable_leb_size;
}
count -= len;
*offp += len;
err = copy_to_user(buf, tbuf, len);
if (err) {
err = -EFAULT;
break;
}
buf += len;
len = count > tbuf_size ? tbuf_size : count;
} while (count);
vfree(tbuf);
return err ? err : count_save - count;
}
/*
* This function allows to directly write to dynamic UBI volumes, without
* issuing the volume update operation.
*/
static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
size_t count, loff_t *offp)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
int lnum, off, len, tbuf_size, err = 0;
size_t count_save = count;
char *tbuf;
if (!vol->direct_writes)
return -EPERM;
dbg_gen("requested: write %zd bytes to offset %lld of volume %u",
count, *offp, vol->vol_id);
if (vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
if (off & (ubi->min_io_size - 1)) {
ubi_err(ubi, "unaligned position");
return -EINVAL;
}
if (*offp + count > vol->used_bytes)
count_save = count = vol->used_bytes - *offp;
/* We can write only in fractions of the minimum I/O unit */
if (count & (ubi->min_io_size - 1)) {
ubi_err(ubi, "unaligned write length");
return -EINVAL;
}
tbuf_size = vol->usable_leb_size;
if (count < tbuf_size)
tbuf_size = ALIGN(count, ubi->min_io_size);
tbuf = vmalloc(tbuf_size);
if (!tbuf)
return -ENOMEM;
len = count > tbuf_size ? tbuf_size : count;
while (count) {
cond_resched();
if (off + len >= vol->usable_leb_size)
len = vol->usable_leb_size - off;
err = copy_from_user(tbuf, buf, len);
if (err) {
err = -EFAULT;
break;
}
err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len);
if (err)
break;
off += len;
if (off == vol->usable_leb_size) {
lnum += 1;
off -= vol->usable_leb_size;
}
count -= len;
*offp += len;
buf += len;
len = count > tbuf_size ? tbuf_size : count;
}
vfree(tbuf);
return err ? err : count_save - count;
}
static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
size_t count, loff_t *offp)
{
int err = 0;
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
if (!vol->updating && !vol->changing_leb)
return vol_cdev_direct_write(file, buf, count, offp);
if (vol->updating)
err = ubi_more_update_data(ubi, vol, buf, count);
else
err = ubi_more_leb_change_data(ubi, vol, buf, count);
if (err < 0) {
ubi_err(ubi, "cannot accept more %zd bytes of data, error %d",
count, err);
return err;
}
if (err) {
/*
* The operation is finished, @err contains number of actually
* written bytes.
*/
count = err;
if (vol->changing_leb) {
revoke_exclusive(desc, UBI_READWRITE);
return count;
}
/*
* We voluntarily do not take into account the skip_check flag
* as we want to make sure what we wrote was correctly written.
*/
err = ubi_check_volume(ubi, vol->vol_id);
if (err < 0)
return err;
if (err) {
ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
vol->vol_id, ubi->ubi_num);
vol->corrupted = 1;
}
vol->checked = 1;
ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
revoke_exclusive(desc, UBI_READWRITE);
}
return count;
}
static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int err = 0;
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
void __user *argp = (void __user *)arg;
switch (cmd) {
/* Volume update command */
case UBI_IOCVOLUP:
{
int64_t bytes, rsvd_bytes;
if (!capable(CAP_SYS_RESOURCE)) {
err = -EPERM;
break;
}
err = copy_from_user(&bytes, argp, sizeof(int64_t));
if (err) {
err = -EFAULT;
break;
}
if (desc->mode == UBI_READONLY) {
err = -EROFS;
break;
}
rsvd_bytes = (long long)vol->reserved_pebs *
vol->usable_leb_size;
if (bytes < 0 || bytes > rsvd_bytes) {
err = -EINVAL;
break;
}
err = get_exclusive(desc);
if (err < 0)
break;
err = ubi_start_update(ubi, vol, bytes);
if (bytes == 0) {
ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
revoke_exclusive(desc, UBI_READWRITE);
}
break;
}
/* Atomic logical eraseblock change command */
case UBI_IOCEBCH:
{
struct ubi_leb_change_req req;
err = copy_from_user(&req, argp,
sizeof(struct ubi_leb_change_req));
if (err) {
err = -EFAULT;
break;
}
if (desc->mode == UBI_READONLY ||
vol->vol_type == UBI_STATIC_VOLUME) {
err = -EROFS;
break;
}
/* Validate the request */
err = -EINVAL;
if (!ubi_leb_valid(vol, req.lnum) ||
req.bytes < 0 || req.bytes > vol->usable_leb_size)
break;
err = get_exclusive(desc);
if (err < 0)
break;
err = ubi_start_leb_change(ubi, vol, &req);
if (req.bytes == 0)
revoke_exclusive(desc, UBI_READWRITE);
break;
}
/* Logical eraseblock erasure command */
case UBI_IOCEBER:
{
int32_t lnum;
err = get_user(lnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
if (desc->mode == UBI_READONLY ||
vol->vol_type == UBI_STATIC_VOLUME) {
err = -EROFS;
break;
}
if (!ubi_leb_valid(vol, lnum)) {
err = -EINVAL;
break;
}
dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
break;
err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
break;
}
/* Logical eraseblock map command */
case UBI_IOCEBMAP:
{
struct ubi_map_req req;
err = copy_from_user(&req, argp, sizeof(struct ubi_map_req));
if (err) {
err = -EFAULT;
break;
}
err = ubi_leb_map(desc, req.lnum);
break;
}
/* Logical eraseblock un-map command */
case UBI_IOCEBUNMAP:
{
int32_t lnum;
err = get_user(lnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
err = ubi_leb_unmap(desc, lnum);
break;
}
/* Check if logical eraseblock is mapped command */
case UBI_IOCEBISMAP:
{
int32_t lnum;
err = get_user(lnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
err = ubi_is_mapped(desc, lnum);
break;
}
/* Set volume property command */
case UBI_IOCSETVOLPROP:
{
struct ubi_set_vol_prop_req req;
err = copy_from_user(&req, argp,
sizeof(struct ubi_set_vol_prop_req));
if (err) {
err = -EFAULT;
break;
}
switch (req.property) {
case UBI_VOL_PROP_DIRECT_WRITE:
mutex_lock(&ubi->device_mutex);
desc->vol->direct_writes = !!req.value;
mutex_unlock(&ubi->device_mutex);
break;
default:
err = -EINVAL;
break;
}
break;
}
/* Create a R/O block device on top of the UBI volume */
case UBI_IOCVOLCRBLK:
{
struct ubi_volume_info vi;
ubi_get_volume_info(desc, &vi);
err = ubiblock_create(&vi);
break;
}
/* Remove the R/O block device */
case UBI_IOCVOLRMBLK:
{
struct ubi_volume_info vi;
ubi_get_volume_info(desc, &vi);
err = ubiblock_remove(&vi);
break;
}
default:
err = -ENOTTY;
break;
}
return err;
}
/**
* verify_mkvol_req - verify volume creation request.
* @ubi: UBI device description object
* @req: the request to check
*
* This function zero if the request is correct, and %-EINVAL if not.
*/
static int verify_mkvol_req(const struct ubi_device *ubi,
const struct ubi_mkvol_req *req)
{
int n, err = -EINVAL;
if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 ||
req->name_len < 0)
goto bad;
if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) &&
req->vol_id != UBI_VOL_NUM_AUTO)
goto bad;
if (req->alignment == 0)
goto bad;
if (req->bytes == 0)
goto bad;
if (req->vol_type != UBI_DYNAMIC_VOLUME &&
req->vol_type != UBI_STATIC_VOLUME)
goto bad;
if (req->flags & ~UBI_VOL_VALID_FLGS)
goto bad;
if (req->flags & UBI_VOL_SKIP_CRC_CHECK_FLG &&
req->vol_type != UBI_STATIC_VOLUME)
goto bad;
if (req->alignment > ubi->leb_size)
goto bad;
n = req->alignment & (ubi->min_io_size - 1);
if (req->alignment != 1 && n)
goto bad;
if (!req->name[0] || !req->name_len)
goto bad;
if (req->name_len > UBI_VOL_NAME_MAX) {
err = -ENAMETOOLONG;
goto bad;
}
n = strnlen(req->name, req->name_len + 1);
if (n != req->name_len)
goto bad;
return 0;
bad:
ubi_err(ubi, "bad volume creation request");
ubi_dump_mkvol_req(req);
return err;
}
/**
* verify_rsvol_req - verify volume re-size request.
* @ubi: UBI device description object
* @req: the request to check
*
* This function returns zero if the request is correct, and %-EINVAL if not.
*/
static int verify_rsvol_req(const struct ubi_device *ubi,
const struct ubi_rsvol_req *req)
{
if (req->bytes <= 0)
return -EINVAL;
if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots)
return -EINVAL;
return 0;
}
/**
* rename_volumes - rename UBI volumes.
* @ubi: UBI device description object
* @req: volumes re-name request
*
* This is a helper function for the volume re-name IOCTL which validates the
* request, opens the volume and calls corresponding volumes management
* function. Returns zero in case of success and a negative error code in case
* of failure.
*/
static int rename_volumes(struct ubi_device *ubi,
struct ubi_rnvol_req *req)
{
int i, n, err;
struct list_head rename_list;
struct ubi_rename_entry *re, *re1;
if (req->count < 0 || req->count > UBI_MAX_RNVOL)
return -EINVAL;
if (req->count == 0)
return 0;
/* Validate volume IDs and names in the request */
for (i = 0; i < req->count; i++) {
if (req->ents[i].vol_id < 0 ||
req->ents[i].vol_id >= ubi->vtbl_slots)
return -EINVAL;
if (req->ents[i].name_len < 0)
return -EINVAL;
if (req->ents[i].name_len > UBI_VOL_NAME_MAX)
return -ENAMETOOLONG;
req->ents[i].name[req->ents[i].name_len] = '\0';
n = strlen(req->ents[i].name);
if (n != req->ents[i].name_len)
return -EINVAL;
}
/* Make sure volume IDs and names are unique */
for (i = 0; i < req->count - 1; i++) {
for (n = i + 1; n < req->count; n++) {
if (req->ents[i].vol_id == req->ents[n].vol_id) {
ubi_err(ubi, "duplicated volume id %d",
req->ents[i].vol_id);
return -EINVAL;
}
if (!strcmp(req->ents[i].name, req->ents[n].name)) {
ubi_err(ubi, "duplicated volume name \"%s\"",
req->ents[i].name);
return -EINVAL;
}
}
}
/* Create the re-name list */
INIT_LIST_HEAD(&rename_list);
for (i = 0; i < req->count; i++) {
int vol_id = req->ents[i].vol_id;
int name_len = req->ents[i].name_len;
const char *name = req->ents[i].name;
re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
if (!re) {
err = -ENOMEM;
goto out_free;
}
re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_METAONLY);
if (IS_ERR(re->desc)) {
err = PTR_ERR(re->desc);
ubi_err(ubi, "cannot open volume %d, error %d",
vol_id, err);
kfree(re);
goto out_free;
}
/* Skip this re-naming if the name does not really change */
if (re->desc->vol->name_len == name_len &&
!memcmp(re->desc->vol->name, name, name_len)) {
ubi_close_volume(re->desc);
kfree(re);
continue;
}
re->new_name_len = name_len;
memcpy(re->new_name, name, name_len);
list_add_tail(&re->list, &rename_list);
dbg_gen("will rename volume %d from \"%s\" to \"%s\"",
vol_id, re->desc->vol->name, name);
}
if (list_empty(&rename_list))
return 0;
/* Find out the volumes which have to be removed */
list_for_each_entry(re, &rename_list, list) {
struct ubi_volume_desc *desc;
int no_remove_needed = 0;
/*
* Volume @re->vol_id is going to be re-named to
* @re->new_name, while its current name is @name. If a volume
* with name @re->new_name currently exists, it has to be
* removed, unless it is also re-named in the request (@req).
*/
list_for_each_entry(re1, &rename_list, list) {
if (re->new_name_len == re1->desc->vol->name_len &&
!memcmp(re->new_name, re1->desc->vol->name,
re1->desc->vol->name_len)) {
no_remove_needed = 1;
break;
}
}
if (no_remove_needed)
continue;
/*
* It seems we need to remove volume with name @re->new_name,
* if it exists.
*/
desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
UBI_EXCLUSIVE);
if (IS_ERR(desc)) {
err = PTR_ERR(desc);
if (err == -ENODEV)
/* Re-naming into a non-existing volume name */
continue;
/* The volume exists but busy, or an error occurred */
ubi_err(ubi, "cannot open volume \"%s\", error %d",
re->new_name, err);
goto out_free;
}
re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
if (!re1) {
err = -ENOMEM;
ubi_close_volume(desc);
goto out_free;
}
re1->remove = 1;
re1->desc = desc;
list_add(&re1->list, &rename_list);
dbg_gen("will remove volume %d, name \"%s\"",
re1->desc->vol->vol_id, re1->desc->vol->name);
}
mutex_lock(&ubi->device_mutex);
err = ubi_rename_volumes(ubi, &rename_list);
mutex_unlock(&ubi->device_mutex);
out_free:
list_for_each_entry_safe(re, re1, &rename_list, list) {
ubi_close_volume(re->desc);
list_del(&re->list);
kfree(re);
}
return err;
}
static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int err = 0;
struct ubi_device *ubi;
struct ubi_volume_desc *desc;
void __user *argp = (void __user *)arg;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
ubi = ubi_get_by_major(imajor(file->f_mapping->host));
if (!ubi)
return -ENODEV;
switch (cmd) {
/* Create volume command */
case UBI_IOCMKVOL:
{
struct ubi_mkvol_req req;
dbg_gen("create volume");
err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
if (err) {
err = -EFAULT;
break;
}
err = verify_mkvol_req(ubi, &req);
if (err)
break;
mutex_lock(&ubi->device_mutex);
err = ubi_create_volume(ubi, &req);
mutex_unlock(&ubi->device_mutex);
if (err)
break;
err = put_user(req.vol_id, (__user int32_t *)argp);
if (err)
err = -EFAULT;
break;
}
/* Remove volume command */
case UBI_IOCRMVOL:
{
int vol_id;
dbg_gen("remove volume");
err = get_user(vol_id, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
if (IS_ERR(desc)) {
err = PTR_ERR(desc);
break;
}
mutex_lock(&ubi->device_mutex);
err = ubi_remove_volume(desc, 0);
mutex_unlock(&ubi->device_mutex);
/*
* The volume is deleted (unless an error occurred), and the
* 'struct ubi_volume' object will be freed when
* 'ubi_close_volume()' will call 'put_device()'.
*/
ubi_close_volume(desc);
break;
}
/* Re-size volume command */
case UBI_IOCRSVOL:
{
int pebs;
struct ubi_rsvol_req req;
dbg_gen("re-size volume");
err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
if (err) {
err = -EFAULT;
break;
}
err = verify_rsvol_req(ubi, &req);
if (err)
break;
desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE);
if (IS_ERR(desc)) {
err = PTR_ERR(desc);
break;
}
pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
desc->vol->usable_leb_size);
mutex_lock(&ubi->device_mutex);
err = ubi_resize_volume(desc, pebs);
mutex_unlock(&ubi->device_mutex);
ubi_close_volume(desc);
break;
}
/* Re-name volumes command */
case UBI_IOCRNVOL:
{
struct ubi_rnvol_req *req;
dbg_gen("re-name volumes");
req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
if (!req) {
err = -ENOMEM;
break;
}
err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req));
if (err) {
err = -EFAULT;
kfree(req);
break;
}
err = rename_volumes(ubi, req);
kfree(req);
break;
}
/* Check a specific PEB for bitflips and scrub it if needed */
case UBI_IOCRPEB:
{
int pnum;
err = get_user(pnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
err = ubi_bitflip_check(ubi, pnum, 0);
break;
}
/* Force scrubbing for a specific PEB */
case UBI_IOCSPEB:
{
int pnum;
err = get_user(pnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
err = ubi_bitflip_check(ubi, pnum, 1);
break;
}
default:
err = -ENOTTY;
break;
}
ubi_put_device(ubi);
return err;
}
static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int err = 0;
void __user *argp = (void __user *)arg;
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
switch (cmd) {
/* Attach an MTD device command */
case UBI_IOCATT:
{
struct ubi_attach_req req;
struct mtd_info *mtd;
dbg_gen("attach MTD device");
err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
if (err) {
err = -EFAULT;
break;
}
if (req.mtd_num < 0 ||
(req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
err = -EINVAL;
break;
}
mtd = get_mtd_device(NULL, req.mtd_num);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
break;
}
/*
* Note, further request verification is done by
* 'ubi_attach_mtd_dev()'.
*/
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
req.max_beb_per1024, !!req.disable_fm);
mutex_unlock(&ubi_devices_mutex);
if (err < 0)
put_mtd_device(mtd);
else
/* @err contains UBI device number */
err = put_user(err, (__user int32_t *)argp);
break;
}
/* Detach an MTD device command */
case UBI_IOCDET:
{
int ubi_num;
dbg_gen("detach MTD device");
err = get_user(ubi_num, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
}
mutex_lock(&ubi_devices_mutex);
err = ubi_detach_mtd_dev(ubi_num, 0);
mutex_unlock(&ubi_devices_mutex);
break;
}
default:
err = -ENOTTY;
break;
}
return err;
}
/* UBI volume character device operations */
const struct file_operations ubi_vol_cdev_operations = {
.owner = THIS_MODULE,
.open = vol_cdev_open,
.release = vol_cdev_release,
.llseek = vol_cdev_llseek,
.read = vol_cdev_read,
.write = vol_cdev_write,
.fsync = vol_cdev_fsync,
.unlocked_ioctl = vol_cdev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
/* UBI character device operations */
const struct file_operations ubi_cdev_operations = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = ubi_cdev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
/* UBI control character device operations */
const struct file_operations ubi_ctrl_cdev_operations = {
.owner = THIS_MODULE,
.unlocked_ioctl = ctrl_cdev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.llseek = no_llseek,
};
| linux-master | drivers/mtd/ubi/cdev.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) International Business Machines Corp., 2006
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/*
* The UBI Eraseblock Association (EBA) sub-system.
*
* This sub-system is responsible for I/O to/from logical eraseblock.
*
* Although in this implementation the EBA table is fully kept and managed in
* RAM, which assumes poor scalability, it might be (partially) maintained on
* flash in future implementations.
*
* The EBA sub-system implements per-logical eraseblock locking. Before
* accessing a logical eraseblock it is locked for reading or writing. The
* per-logical eraseblock locking is implemented by means of the lock tree. The
* lock tree is an RB-tree which refers all the currently locked logical
* eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
* They are indexed by (@vol_id, @lnum) pairs.
*
* EBA also maintains the global sequence counter which is incremented each
* time a logical eraseblock is mapped to a physical eraseblock and it is
* stored in the volume identifier header. This means that each VID header has
* a unique sequence number. The sequence number is only increased an we assume
* 64 bits is enough to never overflow.
*/
#include <linux/slab.h>
#include <linux/crc32.h>
#include <linux/err.h>
#include "ubi.h"
/* Number of physical eraseblocks reserved for atomic LEB change operation */
#define EBA_RESERVED_PEBS 1
/**
* struct ubi_eba_entry - structure encoding a single LEB -> PEB association
* @pnum: the physical eraseblock number attached to the LEB
*
* This structure is encoding a LEB -> PEB association. Note that the LEB
* number is not stored here, because it is the index used to access the
* entries table.
*/
struct ubi_eba_entry {
int pnum;
};
/**
* struct ubi_eba_table - LEB -> PEB association information
* @entries: the LEB to PEB mapping (one entry per LEB).
*
* This structure is private to the EBA logic and should be kept here.
* It is encoding the LEB to PEB association table, and is subject to
* changes.
*/
struct ubi_eba_table {
struct ubi_eba_entry *entries;
};
/**
* ubi_next_sqnum - get next sequence number.
* @ubi: UBI device description object
*
* This function returns next sequence number to use, which is just the current
* global sequence counter value. It also increases the global sequence
* counter.
*/
unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
{
unsigned long long sqnum;
spin_lock(&ubi->ltree_lock);
sqnum = ubi->global_sqnum++;
spin_unlock(&ubi->ltree_lock);
return sqnum;
}
/**
* ubi_get_compat - get compatibility flags of a volume.
* @ubi: UBI device description object
* @vol_id: volume ID
*
* This function returns compatibility flags for an internal volume. User
* volumes have no compatibility flags, so %0 is returned.
*/
static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
{
if (vol_id == UBI_LAYOUT_VOLUME_ID)
return UBI_LAYOUT_VOLUME_COMPAT;
return 0;
}
/**
* ubi_eba_get_ldesc - get information about a LEB
* @vol: volume description object
* @lnum: logical eraseblock number
* @ldesc: the LEB descriptor to fill
*
* Used to query information about a specific LEB.
* It is currently only returning the physical position of the LEB, but will be
* extended to provide more information.
*/
void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum,
struct ubi_eba_leb_desc *ldesc)
{
ldesc->lnum = lnum;
ldesc->pnum = vol->eba_tbl->entries[lnum].pnum;
}
/**
* ubi_eba_create_table - allocate a new EBA table and initialize it with all
* LEBs unmapped
* @vol: volume containing the EBA table to copy
* @nentries: number of entries in the table
*
* Allocate a new EBA table and initialize it with all LEBs unmapped.
* Returns a valid pointer if it succeed, an ERR_PTR() otherwise.
*/
struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol,
int nentries)
{
struct ubi_eba_table *tbl;
int err = -ENOMEM;
int i;
tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
if (!tbl)
return ERR_PTR(-ENOMEM);
tbl->entries = kmalloc_array(nentries, sizeof(*tbl->entries),
GFP_KERNEL);
if (!tbl->entries)
goto err;
for (i = 0; i < nentries; i++)
tbl->entries[i].pnum = UBI_LEB_UNMAPPED;
return tbl;
err:
kfree(tbl);
return ERR_PTR(err);
}
/**
* ubi_eba_destroy_table - destroy an EBA table
* @tbl: the table to destroy
*
* Destroy an EBA table.
*/
void ubi_eba_destroy_table(struct ubi_eba_table *tbl)
{
if (!tbl)
return;
kfree(tbl->entries);
kfree(tbl);
}
/**
* ubi_eba_copy_table - copy the EBA table attached to vol into another table
* @vol: volume containing the EBA table to copy
* @dst: destination
* @nentries: number of entries to copy
*
* Copy the EBA table stored in vol into the one pointed by dst.
*/
void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst,
int nentries)
{
struct ubi_eba_table *src;
int i;
ubi_assert(dst && vol && vol->eba_tbl);
src = vol->eba_tbl;
for (i = 0; i < nentries; i++)
dst->entries[i].pnum = src->entries[i].pnum;
}
/**
* ubi_eba_replace_table - assign a new EBA table to a volume
* @vol: volume containing the EBA table to copy
* @tbl: new EBA table
*
* Assign a new EBA table to the volume and release the old one.
*/
void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl)
{
ubi_eba_destroy_table(vol->eba_tbl);
vol->eba_tbl = tbl;
}
/**
* ltree_lookup - look up the lock tree.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
*
* This function returns a pointer to the corresponding &struct ubi_ltree_entry
* object if the logical eraseblock is locked and %NULL if it is not.
* @ubi->ltree_lock has to be locked.
*/
static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
int lnum)
{
struct rb_node *p;
p = ubi->ltree.rb_node;
while (p) {
struct ubi_ltree_entry *le;
le = rb_entry(p, struct ubi_ltree_entry, rb);
if (vol_id < le->vol_id)
p = p->rb_left;
else if (vol_id > le->vol_id)
p = p->rb_right;
else {
if (lnum < le->lnum)
p = p->rb_left;
else if (lnum > le->lnum)
p = p->rb_right;
else
return le;
}
}
return NULL;
}
/**
* ltree_add_entry - add new entry to the lock tree.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
*
* This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
* lock tree. If such entry is already there, its usage counter is increased.
* Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
* failed.
*/
static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
int vol_id, int lnum)
{
struct ubi_ltree_entry *le, *le1, *le_free;
le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
if (!le)
return ERR_PTR(-ENOMEM);
le->users = 0;
init_rwsem(&le->mutex);
le->vol_id = vol_id;
le->lnum = lnum;
spin_lock(&ubi->ltree_lock);
le1 = ltree_lookup(ubi, vol_id, lnum);
if (le1) {
/*
* This logical eraseblock is already locked. The newly
* allocated lock entry is not needed.
*/
le_free = le;
le = le1;
} else {
struct rb_node **p, *parent = NULL;
/*
* No lock entry, add the newly allocated one to the
* @ubi->ltree RB-tree.
*/
le_free = NULL;
p = &ubi->ltree.rb_node;
while (*p) {
parent = *p;
le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
if (vol_id < le1->vol_id)
p = &(*p)->rb_left;
else if (vol_id > le1->vol_id)
p = &(*p)->rb_right;
else {
ubi_assert(lnum != le1->lnum);
if (lnum < le1->lnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
}
rb_link_node(&le->rb, parent, p);
rb_insert_color(&le->rb, &ubi->ltree);
}
le->users += 1;
spin_unlock(&ubi->ltree_lock);
kfree(le_free);
return le;
}
/**
* leb_read_lock - lock logical eraseblock for reading.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
*
* This function locks a logical eraseblock for reading. Returns zero in case
* of success and a negative error code in case of failure.
*/
static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
{
struct ubi_ltree_entry *le;
le = ltree_add_entry(ubi, vol_id, lnum);
if (IS_ERR(le))
return PTR_ERR(le);
down_read(&le->mutex);
return 0;
}
/**
* leb_read_unlock - unlock logical eraseblock.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
*/
static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
{
struct ubi_ltree_entry *le;
spin_lock(&ubi->ltree_lock);
le = ltree_lookup(ubi, vol_id, lnum);
le->users -= 1;
ubi_assert(le->users >= 0);
up_read(&le->mutex);
if (le->users == 0) {
rb_erase(&le->rb, &ubi->ltree);
kfree(le);
}
spin_unlock(&ubi->ltree_lock);
}
/**
* leb_write_lock - lock logical eraseblock for writing.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
*
* This function locks a logical eraseblock for writing. Returns zero in case
* of success and a negative error code in case of failure.
*/
static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
{
struct ubi_ltree_entry *le;
le = ltree_add_entry(ubi, vol_id, lnum);
if (IS_ERR(le))
return PTR_ERR(le);
down_write(&le->mutex);
return 0;
}
/**
* leb_write_trylock - try to lock logical eraseblock for writing.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
*
* This function locks a logical eraseblock for writing if there is no
* contention and does nothing if there is contention. Returns %0 in case of
* success, %1 in case of contention, and a negative error code in case of
* failure.
*/
static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
{
struct ubi_ltree_entry *le;
le = ltree_add_entry(ubi, vol_id, lnum);
if (IS_ERR(le))
return PTR_ERR(le);
if (down_write_trylock(&le->mutex))
return 0;
/* Contention, cancel */
spin_lock(&ubi->ltree_lock);
le->users -= 1;
ubi_assert(le->users >= 0);
if (le->users == 0) {
rb_erase(&le->rb, &ubi->ltree);
kfree(le);
}
spin_unlock(&ubi->ltree_lock);
return 1;
}
/**
* leb_write_unlock - unlock logical eraseblock.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
*/
static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
{
struct ubi_ltree_entry *le;
spin_lock(&ubi->ltree_lock);
le = ltree_lookup(ubi, vol_id, lnum);
le->users -= 1;
ubi_assert(le->users >= 0);
up_write(&le->mutex);
if (le->users == 0) {
rb_erase(&le->rb, &ubi->ltree);
kfree(le);
}
spin_unlock(&ubi->ltree_lock);
}
/**
* ubi_eba_is_mapped - check if a LEB is mapped.
* @vol: volume description object
* @lnum: logical eraseblock number
*
* This function returns true if the LEB is mapped, false otherwise.
*/
bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum)
{
return vol->eba_tbl->entries[lnum].pnum >= 0;
}
/**
* ubi_eba_unmap_leb - un-map logical eraseblock.
* @ubi: UBI device description object
* @vol: volume description object
* @lnum: logical eraseblock number
*
* This function un-maps logical eraseblock @lnum and schedules corresponding
* physical eraseblock for erasure. Returns zero in case of success and a
* negative error code in case of failure.
*/
int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum)
{
int err, pnum, vol_id = vol->vol_id;
if (ubi->ro_mode)
return -EROFS;
err = leb_write_lock(ubi, vol_id, lnum);
if (err)
return err;
pnum = vol->eba_tbl->entries[lnum].pnum;
if (pnum < 0)
/* This logical eraseblock is already unmapped */
goto out_unlock;
dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
down_read(&ubi->fm_eba_sem);
vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
up_read(&ubi->fm_eba_sem);
err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
out_unlock:
leb_write_unlock(ubi, vol_id, lnum);
return err;
}
#ifdef CONFIG_MTD_UBI_FASTMAP
/**
* check_mapping - check and fixup a mapping
* @ubi: UBI device description object
* @vol: volume description object
* @lnum: logical eraseblock number
* @pnum: physical eraseblock number
*
* Checks whether a given mapping is valid. Fastmap cannot track LEB unmap
* operations, if such an operation is interrupted the mapping still looks
* good, but upon first read an ECC is reported to the upper layer.
* Normaly during the full-scan at attach time this is fixed, for Fastmap
* we have to deal with it while reading.
* If the PEB behind a LEB shows this symthom we change the mapping to
* %UBI_LEB_UNMAPPED and schedule the PEB for erasure.
*
* Returns 0 on success, negative error code in case of failure.
*/
static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
int *pnum)
{
int err;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
if (!ubi->fast_attach)
return 0;
if (!vol->checkmap || test_bit(lnum, vol->checkmap))
return 0;
vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!vidb)
return -ENOMEM;
err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0);
if (err > 0 && err != UBI_IO_BITFLIPS) {
int torture = 0;
switch (err) {
case UBI_IO_FF:
case UBI_IO_FF_BITFLIPS:
case UBI_IO_BAD_HDR:
case UBI_IO_BAD_HDR_EBADMSG:
break;
default:
ubi_assert(0);
}
if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
torture = 1;
down_read(&ubi->fm_eba_sem);
vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
up_read(&ubi->fm_eba_sem);
ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
*pnum = UBI_LEB_UNMAPPED;
} else if (err < 0) {
ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
*pnum, err);
goto out_free;
} else {
int found_vol_id, found_lnum;
ubi_assert(err == 0 || err == UBI_IO_BITFLIPS);
vid_hdr = ubi_get_vid_hdr(vidb);
found_vol_id = be32_to_cpu(vid_hdr->vol_id);
found_lnum = be32_to_cpu(vid_hdr->lnum);
if (found_lnum != lnum || found_vol_id != vol->vol_id) {
ubi_err(ubi, "EBA mismatch! PEB %i is LEB %i:%i instead of LEB %i:%i",
*pnum, found_vol_id, found_lnum, vol->vol_id, lnum);
ubi_ro_mode(ubi);
err = -EINVAL;
goto out_free;
}
}
set_bit(lnum, vol->checkmap);
err = 0;
out_free:
ubi_free_vid_buf(vidb);
return err;
}
#else
static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
int *pnum)
{
return 0;
}
#endif
/**
* ubi_eba_read_leb - read data.
* @ubi: UBI device description object
* @vol: volume description object
* @lnum: logical eraseblock number
* @buf: buffer to store the read data
* @offset: offset from where to read
* @len: how many bytes to read
* @check: data CRC check flag
*
* If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
* bytes. The @check flag only makes sense for static volumes and forces
* eraseblock data CRC checking.
*
* In case of success this function returns zero. In case of a static volume,
* if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
* returned for any volume type if an ECC error was detected by the MTD device
* driver. Other negative error cored may be returned in case of other errors.
*/
int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
void *buf, int offset, int len, int check)
{
int err, pnum, scrub = 0, vol_id = vol->vol_id;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
err = leb_read_lock(ubi, vol_id, lnum);
if (err)
return err;
pnum = vol->eba_tbl->entries[lnum].pnum;
if (pnum >= 0) {
err = check_mapping(ubi, vol, lnum, &pnum);
if (err < 0)
goto out_unlock;
}
if (pnum == UBI_LEB_UNMAPPED) {
/*
* The logical eraseblock is not mapped, fill the whole buffer
* with 0xFF bytes. The exception is static volumes for which
* it is an error to read unmapped logical eraseblocks.
*/
dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
len, offset, vol_id, lnum);
leb_read_unlock(ubi, vol_id, lnum);
ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
memset(buf, 0xFF, len);
return 0;
}
dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
check = 0;
retry:
if (check) {
vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!vidb) {
err = -ENOMEM;
goto out_unlock;
}
vid_hdr = ubi_get_vid_hdr(vidb);
err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
if (err && err != UBI_IO_BITFLIPS) {
if (err > 0) {
/*
* The header is either absent or corrupted.
* The former case means there is a bug -
* switch to read-only mode just in case.
* The latter case means a real corruption - we
* may try to recover data. FIXME: but this is
* not implemented.
*/
if (err == UBI_IO_BAD_HDR_EBADMSG ||
err == UBI_IO_BAD_HDR) {
ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
pnum, vol_id, lnum);
err = -EBADMSG;
} else {
/*
* Ending up here in the non-Fastmap case
* is a clear bug as the VID header had to
* be present at scan time to have it referenced.
* With fastmap the story is more complicated.
* Fastmap has the mapping info without the need
* of a full scan. So the LEB could have been
* unmapped, Fastmap cannot know this and keeps
* the LEB referenced.
* This is valid and works as the layer above UBI
* has to do bookkeeping about used/referenced
* LEBs in any case.
*/
if (ubi->fast_attach) {
err = -EBADMSG;
} else {
err = -EINVAL;
ubi_ro_mode(ubi);
}
}
}
goto out_free;
} else if (err == UBI_IO_BITFLIPS)
scrub = 1;
ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
crc = be32_to_cpu(vid_hdr->data_crc);
ubi_free_vid_buf(vidb);
}
err = ubi_io_read_data(ubi, buf, pnum, offset, len);
if (err) {
if (err == UBI_IO_BITFLIPS)
scrub = 1;
else if (mtd_is_eccerr(err)) {
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
goto out_unlock;
scrub = 1;
if (!check) {
ubi_msg(ubi, "force data checking");
check = 1;
goto retry;
}
} else
goto out_unlock;
}
if (check) {
uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
if (crc1 != crc) {
ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
crc1, crc);
err = -EBADMSG;
goto out_unlock;
}
}
if (scrub)
err = ubi_wl_scrub_peb(ubi, pnum);
leb_read_unlock(ubi, vol_id, lnum);
return err;
out_free:
ubi_free_vid_buf(vidb);
out_unlock:
leb_read_unlock(ubi, vol_id, lnum);
return err;
}
/**
* ubi_eba_read_leb_sg - read data into a scatter gather list.
* @ubi: UBI device description object
* @vol: volume description object
* @lnum: logical eraseblock number
* @sgl: UBI scatter gather list to store the read data
* @offset: offset from where to read
* @len: how many bytes to read
* @check: data CRC check flag
*
* This function works exactly like ubi_eba_read_leb(). But instead of
* storing the read data into a buffer it writes to an UBI scatter gather
* list.
*/
int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
struct ubi_sgl *sgl, int lnum, int offset, int len,
int check)
{
int to_read;
int ret;
struct scatterlist *sg;
for (;;) {
ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
sg = &sgl->sg[sgl->list_pos];
if (len < sg->length - sgl->page_pos)
to_read = len;
else
to_read = sg->length - sgl->page_pos;
ret = ubi_eba_read_leb(ubi, vol, lnum,
sg_virt(sg) + sgl->page_pos, offset,
to_read, check);
if (ret < 0)
return ret;
offset += to_read;
len -= to_read;
if (!len) {
sgl->page_pos += to_read;
if (sgl->page_pos == sg->length) {
sgl->list_pos++;
sgl->page_pos = 0;
}
break;
}
sgl->list_pos++;
sgl->page_pos = 0;
}
return ret;
}
/**
* try_recover_peb - try to recover from write failure.
* @vol: volume description object
* @pnum: the physical eraseblock to recover
* @lnum: logical eraseblock number
* @buf: data which was not written because of the write failure
* @offset: offset of the failed write
* @len: how many bytes should have been written
* @vidb: VID buffer
* @retry: whether the caller should retry in case of failure
*
* This function is called in case of a write failure and moves all good data
* from the potentially bad physical eraseblock to a good physical eraseblock.
* This function also writes the data which was not written due to the failure.
* Returns 0 in case of success, and a negative error code in case of failure.
* In case of failure, the %retry parameter is set to false if this is a fatal
* error (retrying won't help), and true otherwise.
*/
static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum,
const void *buf, int offset, int len,
struct ubi_vid_io_buf *vidb, bool *retry)
{
struct ubi_device *ubi = vol->ubi;
struct ubi_vid_hdr *vid_hdr;
int new_pnum, err, vol_id = vol->vol_id, data_size;
uint32_t crc;
*retry = false;
new_pnum = ubi_wl_get_peb(ubi);
if (new_pnum < 0) {
err = new_pnum;
goto out_put;
}
ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
pnum, new_pnum);
err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
if (err && err != UBI_IO_BITFLIPS) {
if (err > 0)
err = -EIO;
goto out_put;
}
vid_hdr = ubi_get_vid_hdr(vidb);
ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
mutex_lock(&ubi->buf_mutex);
memset(ubi->peb_buf + offset, 0xFF, len);
/* Read everything before the area where the write failure happened */
if (offset > 0) {
err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
if (err && err != UBI_IO_BITFLIPS)
goto out_unlock;
}
*retry = true;
memcpy(ubi->peb_buf + offset, buf, len);
data_size = offset + len;
crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->copy_flag = 1;
vid_hdr->data_size = cpu_to_be32(data_size);
vid_hdr->data_crc = cpu_to_be32(crc);
err = ubi_io_write_vid_hdr(ubi, new_pnum, vidb);
if (err)
goto out_unlock;
err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
out_unlock:
mutex_unlock(&ubi->buf_mutex);
if (!err)
vol->eba_tbl->entries[lnum].pnum = new_pnum;
out_put:
up_read(&ubi->fm_eba_sem);
if (!err) {
ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
ubi_msg(ubi, "data was successfully recovered");
} else if (new_pnum >= 0) {
/*
* Bad luck? This physical eraseblock is bad too? Crud. Let's
* try to get another one.
*/
ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
}
return err;
}
/**
* recover_peb - recover from write failure.
* @ubi: UBI device description object
* @pnum: the physical eraseblock to recover
* @vol_id: volume ID
* @lnum: logical eraseblock number
* @buf: data which was not written because of the write failure
* @offset: offset of the failed write
* @len: how many bytes should have been written
*
* This function is called in case of a write failure and moves all good data
* from the potentially bad physical eraseblock to a good physical eraseblock.
* This function also writes the data which was not written due to the failure.
* Returns 0 in case of success, and a negative error code in case of failure.
* This function tries %UBI_IO_RETRIES before giving up.
*/
static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
const void *buf, int offset, int len)
{
int err, idx = vol_id2idx(ubi, vol_id), tries;
struct ubi_volume *vol = ubi->volumes[idx];
struct ubi_vid_io_buf *vidb;
vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!vidb)
return -ENOMEM;
for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
bool retry;
err = try_recover_peb(vol, pnum, lnum, buf, offset, len, vidb,
&retry);
if (!err || !retry)
break;
ubi_msg(ubi, "try again");
}
ubi_free_vid_buf(vidb);
return err;
}
/**
* try_write_vid_and_data - try to write VID header and data to a new PEB.
* @vol: volume description object
* @lnum: logical eraseblock number
* @vidb: the VID buffer to write
* @buf: buffer containing the data
* @offset: where to start writing data
* @len: how many bytes should be written
*
* This function tries to write VID header and data belonging to logical
* eraseblock @lnum of volume @vol to a new physical eraseblock. Returns zero
* in case of success and a negative error code in case of failure.
* In case of error, it is possible that something was still written to the
* flash media, but may be some garbage.
*/
static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
struct ubi_vid_io_buf *vidb, const void *buf,
int offset, int len)
{
struct ubi_device *ubi = vol->ubi;
int pnum, opnum, err, err2, vol_id = vol->vol_id;
pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
err = pnum;
goto out_put;
}
opnum = vol->eba_tbl->entries[lnum].pnum;
dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
err = ubi_io_write_vid_hdr(ubi, pnum, vidb);
if (err) {
ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum);
goto out_put;
}
if (len) {
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
if (err) {
ubi_warn(ubi,
"failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
goto out_put;
}
}
vol->eba_tbl->entries[lnum].pnum = pnum;
out_put:
up_read(&ubi->fm_eba_sem);
if (err && pnum >= 0) {
err2 = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err2) {
ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
pnum, err2);
}
} else if (!err && opnum >= 0) {
err2 = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
if (err2) {
ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
opnum, err2);
}
}
return err;
}
/**
* ubi_eba_write_leb - write data to dynamic volume.
* @ubi: UBI device description object
* @vol: volume description object
* @lnum: logical eraseblock number
* @buf: the data to write
* @offset: offset within the logical eraseblock where to write
* @len: how many bytes to write
*
* This function writes data to logical eraseblock @lnum of a dynamic volume
* @vol. Returns zero in case of success and a negative error code in case
* of failure. In case of error, it is possible that something was still
* written to the flash media, but may be some garbage.
* This function retries %UBI_IO_RETRIES times before giving up.
*/
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
const void *buf, int offset, int len)
{
int err, pnum, tries, vol_id = vol->vol_id;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
if (ubi->ro_mode)
return -EROFS;
err = leb_write_lock(ubi, vol_id, lnum);
if (err)
return err;
pnum = vol->eba_tbl->entries[lnum].pnum;
if (pnum >= 0) {
err = check_mapping(ubi, vol, lnum, &pnum);
if (err < 0)
goto out;
}
if (pnum >= 0) {
dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
if (err) {
ubi_warn(ubi, "failed to write data to PEB %d", pnum);
if (err == -EIO && ubi->bad_allowed)
err = recover_peb(ubi, pnum, vol_id, lnum, buf,
offset, len);
}
goto out;
}
/*
* The logical eraseblock is not mapped. We have to get a free physical
* eraseblock and write the volume identifier header there first.
*/
vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!vidb) {
leb_write_unlock(ubi, vol_id, lnum);
return -ENOMEM;
}
vid_hdr = ubi_get_vid_hdr(vidb);
vid_hdr->vol_type = UBI_VID_DYNAMIC;
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
err = try_write_vid_and_data(vol, lnum, vidb, buf, offset, len);
if (err != -EIO || !ubi->bad_allowed)
break;
/*
* Fortunately, this is the first write operation to this
* physical eraseblock, so just put it and request a new one.
* We assume that if this physical eraseblock went bad, the
* erase code will handle that.
*/
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg(ubi, "try another PEB");
}
ubi_free_vid_buf(vidb);
out:
if (err)
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
return err;
}
/**
* ubi_eba_write_leb_st - write data to static volume.
* @ubi: UBI device description object
* @vol: volume description object
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
* @used_ebs: how many logical eraseblocks will this volume contain
*
* This function writes data to logical eraseblock @lnum of static volume
* @vol. The @used_ebs argument should contain total number of logical
* eraseblock in this static volume.
*
* When writing to the last logical eraseblock, the @len argument doesn't have
* to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
* to the real data size, although the @buf buffer has to contain the
* alignment. In all other cases, @len has to be aligned.
*
* It is prohibited to write more than once to logical eraseblocks of static
* volumes. This function returns zero in case of success and a negative error
* code in case of failure.
*/
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum, const void *buf, int len, int used_ebs)
{
int err, tries, data_size = len, vol_id = vol->vol_id;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
if (ubi->ro_mode)
return -EROFS;
if (lnum == used_ebs - 1)
/* If this is the last LEB @len may be unaligned */
len = ALIGN(data_size, ubi->min_io_size);
else
ubi_assert(!(len & (ubi->min_io_size - 1)));
vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!vidb)
return -ENOMEM;
vid_hdr = ubi_get_vid_hdr(vidb);
err = leb_write_lock(ubi, vol_id, lnum);
if (err)
goto out;
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
crc = crc32(UBI_CRC32_INIT, buf, data_size);
vid_hdr->vol_type = UBI_VID_STATIC;
vid_hdr->data_size = cpu_to_be32(data_size);
vid_hdr->used_ebs = cpu_to_be32(used_ebs);
vid_hdr->data_crc = cpu_to_be32(crc);
ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0);
for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
if (err != -EIO || !ubi->bad_allowed)
break;
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg(ubi, "try another PEB");
}
if (err)
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
out:
ubi_free_vid_buf(vidb);
return err;
}
/*
* ubi_eba_atomic_leb_change - change logical eraseblock atomically.
* @ubi: UBI device description object
* @vol: volume description object
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
* data, which has to be aligned. This function guarantees that in case of an
* unclean reboot the old contents is preserved. Returns zero in case of
* success and a negative error code in case of failure.
*
* UBI reserves one LEB for the "atomic LEB change" operation, so only one
* LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
*/
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum, const void *buf, int len)
{
int err, tries, vol_id = vol->vol_id;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
if (ubi->ro_mode)
return -EROFS;
if (len == 0) {
/*
* Special case when data length is zero. In this case the LEB
* has to be unmapped and mapped somewhere else.
*/
err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
return err;
return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
}
vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!vidb)
return -ENOMEM;
vid_hdr = ubi_get_vid_hdr(vidb);
mutex_lock(&ubi->alc_mutex);
err = leb_write_lock(ubi, vol_id, lnum);
if (err)
goto out_mutex;
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
crc = crc32(UBI_CRC32_INIT, buf, len);
vid_hdr->vol_type = UBI_VID_DYNAMIC;
vid_hdr->data_size = cpu_to_be32(len);
vid_hdr->copy_flag = 1;
vid_hdr->data_crc = cpu_to_be32(crc);
dbg_eba("change LEB %d:%d", vol_id, lnum);
for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
if (err != -EIO || !ubi->bad_allowed)
break;
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg(ubi, "try another PEB");
}
/*
* This flash device does not admit of bad eraseblocks or
* something nasty and unexpected happened. Switch to read-only
* mode just in case.
*/
if (err)
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
out_mutex:
mutex_unlock(&ubi->alc_mutex);
ubi_free_vid_buf(vidb);
return err;
}
/**
* is_error_sane - check whether a read error is sane.
* @err: code of the error happened during reading
*
* This is a helper function for 'ubi_eba_copy_leb()' which is called when we
* cannot read data from the target PEB (an error @err happened). If the error
* code is sane, then we treat this error as non-fatal. Otherwise the error is
* fatal and UBI will be switched to R/O mode later.
*
* The idea is that we try not to switch to R/O mode if the read error is
* something which suggests there was a real read problem. E.g., %-EIO. Or a
* memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
* mode, simply because we do not know what happened at the MTD level, and we
* cannot handle this. E.g., the underlying driver may have become crazy, and
* it is safer to switch to R/O mode to preserve the data.
*
* And bear in mind, this is about reading from the target PEB, i.e. the PEB
* which we have just written.
*/
static int is_error_sane(int err)
{
if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
return 0;
return 1;
}
/**
* ubi_eba_copy_leb - copy logical eraseblock.
* @ubi: UBI device description object
* @from: physical eraseblock number from where to copy
* @to: physical eraseblock number where to copy
* @vidb: data structure from where the VID header is derived
*
* This function copies logical eraseblock from physical eraseblock @from to
* physical eraseblock @to. The @vid_hdr buffer may be changed by this
* function. Returns:
* o %0 in case of success;
* o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
* o a negative error code in case of failure.
*/
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_io_buf *vidb)
{
int err, vol_id, lnum, data_size, aldata_size, idx;
struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
struct ubi_volume *vol;
uint32_t crc;
ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
vol_id = be32_to_cpu(vid_hdr->vol_id);
lnum = be32_to_cpu(vid_hdr->lnum);
dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
if (vid_hdr->vol_type == UBI_VID_STATIC) {
data_size = be32_to_cpu(vid_hdr->data_size);
aldata_size = ALIGN(data_size, ubi->min_io_size);
} else
data_size = aldata_size =
ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
idx = vol_id2idx(ubi, vol_id);
spin_lock(&ubi->volumes_lock);
/*
* Note, we may race with volume deletion, which means that the volume
* this logical eraseblock belongs to might be being deleted. Since the
* volume deletion un-maps all the volume's logical eraseblocks, it will
* be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
*/
vol = ubi->volumes[idx];
spin_unlock(&ubi->volumes_lock);
if (!vol) {
/* No need to do further work, cancel */
dbg_wl("volume %d is being removed, cancel", vol_id);
return MOVE_CANCEL_RACE;
}
/*
* We do not want anybody to write to this logical eraseblock while we
* are moving it, so lock it.
*
* Note, we are using non-waiting locking here, because we cannot sleep
* on the LEB, since it may cause deadlocks. Indeed, imagine a task is
* unmapping the LEB which is mapped to the PEB we are going to move
* (@from). This task locks the LEB and goes sleep in the
* 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
* holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
* LEB is already locked, we just do not move it and return
* %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
* we do not know the reasons of the contention - it may be just a
* normal I/O on this LEB, so we want to re-try.
*/
err = leb_write_trylock(ubi, vol_id, lnum);
if (err) {
dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
return MOVE_RETRY;
}
/*
* The LEB might have been put meanwhile, and the task which put it is
* probably waiting on @ubi->move_mutex. No need to continue the work,
* cancel it.
*/
if (vol->eba_tbl->entries[lnum].pnum != from) {
dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum);
err = MOVE_CANCEL_RACE;
goto out_unlock_leb;
}
/*
* OK, now the LEB is locked and we can safely start moving it. Since
* this function utilizes the @ubi->peb_buf buffer which is shared
* with some other functions - we lock the buffer by taking the
* @ubi->buf_mutex.
*/
mutex_lock(&ubi->buf_mutex);
dbg_wl("read %d bytes of data", aldata_size);
err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
if (err && err != UBI_IO_BITFLIPS) {
ubi_warn(ubi, "error %d while reading data from PEB %d",
err, from);
err = MOVE_SOURCE_RD_ERR;
goto out_unlock_buf;
}
/*
* Now we have got to calculate how much data we have to copy. In
* case of a static volume it is fairly easy - the VID header contains
* the data size. In case of a dynamic volume it is more difficult - we
* have to read the contents, cut 0xFF bytes from the end and copy only
* the first part. We must do this to avoid writing 0xFF bytes as it
* may have some side-effects. And not only this. It is important not
* to include those 0xFFs to CRC because later the they may be filled
* by data.
*/
if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
aldata_size = data_size =
ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
cond_resched();
crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
cond_resched();
/*
* It may turn out to be that the whole @from physical eraseblock
* contains only 0xFF bytes. Then we have to only write the VID header
* and do not write any data. This also means we should not set
* @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
*/
if (data_size > 0) {
vid_hdr->copy_flag = 1;
vid_hdr->data_size = cpu_to_be32(data_size);
vid_hdr->data_crc = cpu_to_be32(crc);
}
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, to, vidb);
if (err) {
if (err == -EIO)
err = MOVE_TARGET_WR_ERR;
goto out_unlock_buf;
}
cond_resched();
/* Read the VID header back and check if it was written correctly */
err = ubi_io_read_vid_hdr(ubi, to, vidb, 1);
if (err) {
if (err != UBI_IO_BITFLIPS) {
ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
err, to);
if (is_error_sane(err))
err = MOVE_TARGET_RD_ERR;
} else
err = MOVE_TARGET_BITFLIPS;
goto out_unlock_buf;
}
if (data_size > 0) {
err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
if (err) {
if (err == -EIO)
err = MOVE_TARGET_WR_ERR;
goto out_unlock_buf;
}
cond_resched();
}
ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
vol->eba_tbl->entries[lnum].pnum = to;
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
out_unlock_leb:
leb_write_unlock(ubi, vol_id, lnum);
return err;
}
/**
* print_rsvd_warning - warn about not having enough reserved PEBs.
* @ubi: UBI device description object
* @ai: UBI attach info object
*
* This is a helper function for 'ubi_eba_init()' which is called when UBI
* cannot reserve enough PEBs for bad block handling. This function makes a
* decision whether we have to print a warning or not. The algorithm is as
* follows:
* o if this is a new UBI image, then just print the warning
* o if this is an UBI image which has already been used for some time, print
* a warning only if we can reserve less than 10% of the expected amount of
* the reserved PEB.
*
* The idea is that when UBI is used, PEBs become bad, and the reserved pool
* of PEBs becomes smaller, which is normal and we do not want to scare users
* with a warning every time they attach the MTD device. This was an issue
* reported by real users.
*/
static void print_rsvd_warning(struct ubi_device *ubi,
struct ubi_attach_info *ai)
{
/*
* The 1 << 18 (256KiB) number is picked randomly, just a reasonably
* large number to distinguish between newly flashed and used images.
*/
if (ai->max_sqnum > (1 << 18)) {
int min = ubi->beb_rsvd_level / 10;
if (!min)
min = 1;
if (ubi->beb_rsvd_pebs > min)
return;
}
ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
if (ubi->corr_peb_count)
ubi_warn(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
}
/**
* self_check_eba - run a self check on the EBA table constructed by fastmap.
* @ubi: UBI device description object
* @ai_fastmap: UBI attach info object created by fastmap
* @ai_scan: UBI attach info object created by scanning
*
* Returns < 0 in case of an internal error, 0 otherwise.
* If a bad EBA table entry was found it will be printed out and
* ubi_assert() triggers.
*/
int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
struct ubi_attach_info *ai_scan)
{
int i, j, num_volumes, ret = 0;
int **scan_eba, **fm_eba;
struct ubi_ainf_volume *av;
struct ubi_volume *vol;
struct ubi_ainf_peb *aeb;
struct rb_node *rb;
num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
scan_eba = kmalloc_array(num_volumes, sizeof(*scan_eba), GFP_KERNEL);
if (!scan_eba)
return -ENOMEM;
fm_eba = kmalloc_array(num_volumes, sizeof(*fm_eba), GFP_KERNEL);
if (!fm_eba) {
kfree(scan_eba);
return -ENOMEM;
}
for (i = 0; i < num_volumes; i++) {
vol = ubi->volumes[i];
if (!vol)
continue;
scan_eba[i] = kmalloc_array(vol->reserved_pebs,
sizeof(**scan_eba),
GFP_KERNEL);
if (!scan_eba[i]) {
ret = -ENOMEM;
goto out_free;
}
fm_eba[i] = kmalloc_array(vol->reserved_pebs,
sizeof(**fm_eba),
GFP_KERNEL);
if (!fm_eba[i]) {
ret = -ENOMEM;
goto out_free;
}
for (j = 0; j < vol->reserved_pebs; j++)
scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
if (!av)
continue;
ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
scan_eba[i][aeb->lnum] = aeb->pnum;
av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
if (!av)
continue;
ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
fm_eba[i][aeb->lnum] = aeb->pnum;
for (j = 0; j < vol->reserved_pebs; j++) {
if (scan_eba[i][j] != fm_eba[i][j]) {
if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
fm_eba[i][j] == UBI_LEB_UNMAPPED)
continue;
ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
vol->vol_id, j, fm_eba[i][j],
scan_eba[i][j]);
ubi_assert(0);
}
}
}
out_free:
for (i = 0; i < num_volumes; i++) {
if (!ubi->volumes[i])
continue;
kfree(scan_eba[i]);
kfree(fm_eba[i]);
}
kfree(scan_eba);
kfree(fm_eba);
return ret;
}
/**
* ubi_eba_init - initialize the EBA sub-system using attaching information.
* @ubi: UBI device description object
* @ai: attaching information
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int i, err, num_volumes;
struct ubi_ainf_volume *av;
struct ubi_volume *vol;
struct ubi_ainf_peb *aeb;
struct rb_node *rb;
dbg_eba("initialize EBA sub-system");
spin_lock_init(&ubi->ltree_lock);
mutex_init(&ubi->alc_mutex);
ubi->ltree = RB_ROOT;
ubi->global_sqnum = ai->max_sqnum + 1;
num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
for (i = 0; i < num_volumes; i++) {
struct ubi_eba_table *tbl;
vol = ubi->volumes[i];
if (!vol)
continue;
cond_resched();
tbl = ubi_eba_create_table(vol, vol->reserved_pebs);
if (IS_ERR(tbl)) {
err = PTR_ERR(tbl);
goto out_free;
}
ubi_eba_replace_table(vol, tbl);
av = ubi_find_av(ai, idx2vol_id(ubi, i));
if (!av)
continue;
ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
if (aeb->lnum >= vol->reserved_pebs) {
/*
* This may happen in case of an unclean reboot
* during re-size.
*/
ubi_move_aeb_to_list(av, aeb, &ai->erase);
} else {
struct ubi_eba_entry *entry;
entry = &vol->eba_tbl->entries[aeb->lnum];
entry->pnum = aeb->pnum;
}
}
}
if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
ubi->avail_pebs, EBA_RESERVED_PEBS);
if (ubi->corr_peb_count)
ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
err = -ENOSPC;
goto out_free;
}
ubi->avail_pebs -= EBA_RESERVED_PEBS;
ubi->rsvd_pebs += EBA_RESERVED_PEBS;
if (ubi->bad_allowed) {
ubi_calculate_reserved(ubi);
if (ubi->avail_pebs < ubi->beb_rsvd_level) {
/* No enough free physical eraseblocks */
ubi->beb_rsvd_pebs = ubi->avail_pebs;
print_rsvd_warning(ubi, ai);
} else
ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
ubi->avail_pebs -= ubi->beb_rsvd_pebs;
ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
}
dbg_eba("EBA sub-system is initialized");
return 0;
out_free:
for (i = 0; i < num_volumes; i++) {
if (!ubi->volumes[i])
continue;
ubi_eba_replace_table(ubi->volumes[i], NULL);
}
return err;
}
| linux-master | drivers/mtd/ubi/eba.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* LPDDR flash memory device operations. This module provides read, write,
* erase, lock/unlock support for LPDDR flash memories
* (C) 2008 Korolev Alexey <[email protected]>
* (C) 2008 Vasiliy Leonenko <[email protected]>
* Many thanks to Roman Borisov for initial enabling
*
* TODO:
* Implement VPP management
* Implement XIP support
* Implement OTP support
*/
#include <linux/mtd/pfow.h>
#include <linux/mtd/qinfo.h>
#include <linux/slab.h>
#include <linux/module.h>
static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
size_t *retlen, u_char *buf);
static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
size_t len, size_t *retlen, const u_char *buf);
static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen);
static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
size_t *retlen, void **mtdbuf, resource_size_t *phys);
static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
static int get_chip(struct map_info *map, struct flchip *chip, int mode);
static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
static void put_chip(struct map_info *map, struct flchip *chip);
struct mtd_info *lpddr_cmdset(struct map_info *map)
{
struct lpddr_private *lpddr = map->fldrv_priv;
struct flchip_shared *shared;
struct flchip *chip;
struct mtd_info *mtd;
int numchips;
int i, j;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
if (!mtd)
return NULL;
mtd->priv = map;
mtd->type = MTD_NORFLASH;
/* Fill in the default mtd operations */
mtd->_read = lpddr_read;
mtd->type = MTD_NORFLASH;
mtd->flags = MTD_CAP_NORFLASH;
mtd->flags &= ~MTD_BIT_WRITEABLE;
mtd->_erase = lpddr_erase;
mtd->_write = lpddr_write_buffers;
mtd->_writev = lpddr_writev;
mtd->_lock = lpddr_lock;
mtd->_unlock = lpddr_unlock;
if (map_is_linear(map)) {
mtd->_point = lpddr_point;
mtd->_unpoint = lpddr_unpoint;
}
mtd->size = 1 << lpddr->qinfo->DevSizeShift;
mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared),
GFP_KERNEL);
if (!shared) {
kfree(mtd);
return NULL;
}
chip = &lpddr->chips[0];
numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
for (i = 0; i < numchips; i++) {
shared[i].writing = shared[i].erasing = NULL;
mutex_init(&shared[i].lock);
for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
*chip = lpddr->chips[i];
chip->start += j << lpddr->chipshift;
chip->oldstate = chip->state = FL_READY;
chip->priv = &shared[i];
/* those should be reset too since
they create memory references. */
init_waitqueue_head(&chip->wq);
mutex_init(&chip->mutex);
chip++;
}
}
return mtd;
}
EXPORT_SYMBOL(lpddr_cmdset);
static void print_drs_error(unsigned int dsr)
{
int prog_status = (dsr & DSR_RPS) >> 8;
if (!(dsr & DSR_AVAILABLE))
pr_notice("DSR.15: (0) Device not Available\n");
if ((prog_status & 0x03) == 0x03)
pr_notice("DSR.9,8: (11) Attempt to program invalid half with 41h command\n");
else if (prog_status & 0x02)
pr_notice("DSR.9,8: (10) Object Mode Program attempt in region with Control Mode data\n");
else if (prog_status & 0x01)
pr_notice("DSR.9,8: (01) Program attempt in region with Object Mode data\n");
if (!(dsr & DSR_READY_STATUS))
pr_notice("DSR.7: (0) Device is Busy\n");
if (dsr & DSR_ESS)
pr_notice("DSR.6: (1) Erase Suspended\n");
if (dsr & DSR_ERASE_STATUS)
pr_notice("DSR.5: (1) Erase/Blank check error\n");
if (dsr & DSR_PROGRAM_STATUS)
pr_notice("DSR.4: (1) Program Error\n");
if (dsr & DSR_VPPS)
pr_notice("DSR.3: (1) Vpp low detect, operation aborted\n");
if (dsr & DSR_PSS)
pr_notice("DSR.2: (1) Program suspended\n");
if (dsr & DSR_DPS)
pr_notice("DSR.1: (1) Aborted Erase/Program attempt on locked block\n");
}
static int wait_for_ready(struct map_info *map, struct flchip *chip,
unsigned int chip_op_time)
{
unsigned int timeo, reset_timeo, sleep_time;
unsigned int dsr;
flstate_t chip_state = chip->state;
int ret = 0;
/* set our timeout to 8 times the expected delay */
timeo = chip_op_time * 8;
if (!timeo)
timeo = 500000;
reset_timeo = timeo;
sleep_time = chip_op_time / 2;
for (;;) {
dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
if (dsr & DSR_READY_STATUS)
break;
if (!timeo) {
printk(KERN_ERR "%s: Flash timeout error state %d \n",
map->name, chip_state);
ret = -ETIME;
break;
}
/* OK Still waiting. Drop the lock, wait a while and retry. */
mutex_unlock(&chip->mutex);
if (sleep_time >= 1000000/HZ) {
/*
* Half of the normal delay still remaining
* can be performed with a sleeping delay instead
* of busy waiting.
*/
msleep(sleep_time/1000);
timeo -= sleep_time;
sleep_time = 1000000/HZ;
} else {
udelay(1);
cond_resched();
timeo--;
}
mutex_lock(&chip->mutex);
while (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
}
if (chip->erase_suspended || chip->write_suspended) {
/* Suspend has occurred while sleep: reset timeout */
timeo = reset_timeo;
chip->erase_suspended = chip->write_suspended = 0;
}
}
/* check status for errors */
if (dsr & DSR_ERR) {
/* Clear DSR*/
map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
map->name, dsr);
print_drs_error(dsr);
ret = -EIO;
}
chip->state = FL_READY;
return ret;
}
static int get_chip(struct map_info *map, struct flchip *chip, int mode)
{
int ret;
DECLARE_WAITQUEUE(wait, current);
retry:
if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
&& chip->state != FL_SYNCING) {
/*
* OK. We have possibility for contension on the write/erase
* operations which are global to the real chip and not per
* partition. So let's fight it over in the partition which
* currently has authority on the operation.
*
* The rules are as follows:
*
* - any write operation must own shared->writing.
*
* - any erase operation must own _both_ shared->writing and
* shared->erasing.
*
* - contension arbitration is handled in the owner's context.
*
* The 'shared' struct can be read and/or written only when
* its lock is taken.
*/
struct flchip_shared *shared = chip->priv;
struct flchip *contender;
mutex_lock(&shared->lock);
contender = shared->writing;
if (contender && contender != chip) {
/*
* The engine to perform desired operation on this
* partition is already in use by someone else.
* Let's fight over it in the context of the chip
* currently using it. If it is possible to suspend,
* that other partition will do just that, otherwise
* it'll happily send us to sleep. In any case, when
* get_chip returns success we're clear to go ahead.
*/
ret = mutex_trylock(&contender->mutex);
mutex_unlock(&shared->lock);
if (!ret)
goto retry;
mutex_unlock(&chip->mutex);
ret = chip_ready(map, contender, mode);
mutex_lock(&chip->mutex);
if (ret == -EAGAIN) {
mutex_unlock(&contender->mutex);
goto retry;
}
if (ret) {
mutex_unlock(&contender->mutex);
return ret;
}
mutex_lock(&shared->lock);
/* We should not own chip if it is already in FL_SYNCING
* state. Put contender and retry. */
if (chip->state == FL_SYNCING) {
put_chip(map, contender);
mutex_unlock(&contender->mutex);
goto retry;
}
mutex_unlock(&contender->mutex);
}
/* Check if we have suspended erase on this chip.
Must sleep in such a case. */
if (mode == FL_ERASING && shared->erasing
&& shared->erasing->oldstate == FL_ERASING) {
mutex_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
goto retry;
}
/* We now own it */
shared->writing = chip;
if (mode == FL_ERASING)
shared->erasing = chip;
mutex_unlock(&shared->lock);
}
ret = chip_ready(map, chip, mode);
if (ret == -EAGAIN)
goto retry;
return ret;
}
static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
{
struct lpddr_private *lpddr = map->fldrv_priv;
int ret = 0;
DECLARE_WAITQUEUE(wait, current);
/* Prevent setting state FL_SYNCING for chip in suspended state. */
if (FL_SYNCING == mode && FL_READY != chip->oldstate)
goto sleep;
switch (chip->state) {
case FL_READY:
case FL_JEDEC_QUERY:
return 0;
case FL_ERASING:
if (!lpddr->qinfo->SuspEraseSupp ||
!(mode == FL_READY || mode == FL_POINT))
goto sleep;
map_write(map, CMD(LPDDR_SUSPEND),
map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
ret = wait_for_ready(map, chip, 0);
if (ret) {
/* Oops. something got wrong. */
/* Resume and pretend we weren't here. */
put_chip(map, chip);
printk(KERN_ERR "%s: suspend operation failed."
"State may be wrong \n", map->name);
return -EIO;
}
chip->erase_suspended = 1;
chip->state = FL_READY;
return 0;
/* Erase suspend */
case FL_POINT:
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
fallthrough;
default:
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
return -EAGAIN;
}
}
static void put_chip(struct map_info *map, struct flchip *chip)
{
if (chip->priv) {
struct flchip_shared *shared = chip->priv;
mutex_lock(&shared->lock);
if (shared->writing == chip && chip->oldstate == FL_READY) {
/* We own the ability to write, but we're done */
shared->writing = shared->erasing;
if (shared->writing && shared->writing != chip) {
/* give back the ownership */
struct flchip *loaner = shared->writing;
mutex_lock(&loaner->mutex);
mutex_unlock(&shared->lock);
mutex_unlock(&chip->mutex);
put_chip(map, loaner);
mutex_lock(&chip->mutex);
mutex_unlock(&loaner->mutex);
wake_up(&chip->wq);
return;
}
shared->erasing = NULL;
shared->writing = NULL;
} else if (shared->erasing == chip && shared->writing != chip) {
/*
* We own the ability to erase without the ability
* to write, which means the erase was suspended
* and some other partition is currently writing.
* Don't let the switch below mess things up since
* we don't have ownership to resume anything.
*/
mutex_unlock(&shared->lock);
wake_up(&chip->wq);
return;
}
mutex_unlock(&shared->lock);
}
switch (chip->oldstate) {
case FL_ERASING:
map_write(map, CMD(LPDDR_RESUME),
map->pfow_base + PFOW_COMMAND_CODE);
map_write(map, CMD(LPDDR_START_EXECUTION),
map->pfow_base + PFOW_COMMAND_EXECUTE);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
break;
case FL_READY:
break;
default:
printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
map->name, chip->oldstate);
}
wake_up(&chip->wq);
}
static int do_write_buffer(struct map_info *map, struct flchip *chip,
unsigned long adr, const struct kvec **pvec,
unsigned long *pvec_seek, int len)
{
struct lpddr_private *lpddr = map->fldrv_priv;
map_word datum;
int ret, wbufsize, word_gap;
const struct kvec *vec;
unsigned long vec_seek;
unsigned long prog_buf_ofs;
wbufsize = 1 << lpddr->qinfo->BufSizeShift;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_WRITING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
/* Figure out the number of words to write */
word_gap = (-adr & (map_bankwidth(map)-1));
if (word_gap) {
word_gap = map_bankwidth(map) - word_gap;
adr -= word_gap;
datum = map_word_ff(map);
}
/* Write data */
/* Get the program buffer offset from PFOW register data first*/
prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
vec = *pvec;
vec_seek = *pvec_seek;
do {
int n = map_bankwidth(map) - word_gap;
if (n > vec->iov_len - vec_seek)
n = vec->iov_len - vec_seek;
if (n > len)
n = len;
if (!word_gap && (len < map_bankwidth(map)))
datum = map_word_ff(map);
datum = map_word_load_partial(map, datum,
vec->iov_base + vec_seek, word_gap, n);
len -= n;
word_gap += n;
if (!len || word_gap == map_bankwidth(map)) {
map_write(map, datum, prog_buf_ofs);
prog_buf_ofs += map_bankwidth(map);
word_gap = 0;
}
vec_seek += n;
if (vec_seek == vec->iov_len) {
vec++;
vec_seek = 0;
}
} while (len);
*pvec = vec;
*pvec_seek = vec_seek;
/* GO GO GO */
send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
chip->state = FL_WRITING;
ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
if (ret) {
printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
map->name, ret, adr);
goto out;
}
out: put_chip(map, chip);
mutex_unlock(&chip->mutex);
return ret;
}
static int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
{
struct map_info *map = mtd->priv;
struct lpddr_private *lpddr = map->fldrv_priv;
int chipnum = adr >> lpddr->chipshift;
struct flchip *chip = &lpddr->chips[chipnum];
int ret;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_ERASING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
chip->state = FL_ERASING;
ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
if (ret) {
printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
map->name, ret, adr);
goto out;
}
out: put_chip(map, chip);
mutex_unlock(&chip->mutex);
return ret;
}
static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
struct lpddr_private *lpddr = map->fldrv_priv;
int chipnum = adr >> lpddr->chipshift;
struct flchip *chip = &lpddr->chips[chipnum];
int ret = 0;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_READY);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
map_copy_from(map, buf, adr, len);
*retlen = len;
put_chip(map, chip);
mutex_unlock(&chip->mutex);
return ret;
}
static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
size_t *retlen, void **mtdbuf, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
struct lpddr_private *lpddr = map->fldrv_priv;
int chipnum = adr >> lpddr->chipshift;
unsigned long ofs, last_end = 0;
struct flchip *chip = &lpddr->chips[chipnum];
int ret = 0;
if (!map->virt)
return -EINVAL;
/* ofs: offset within the first chip that the first read should start */
ofs = adr - (chipnum << lpddr->chipshift);
*mtdbuf = (void *)map->virt + chip->start + ofs;
while (len) {
unsigned long thislen;
if (chipnum >= lpddr->numchips)
break;
/* We cannot point across chips that are virtually disjoint */
if (!last_end)
last_end = chip->start;
else if (chip->start != last_end)
break;
if ((len + ofs - 1) >> lpddr->chipshift)
thislen = (1<<lpddr->chipshift) - ofs;
else
thislen = len;
/* get the chip */
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_POINT);
mutex_unlock(&chip->mutex);
if (ret)
break;
chip->state = FL_POINT;
chip->ref_point_counter++;
*retlen += thislen;
len -= thislen;
ofs = 0;
last_end += 1 << lpddr->chipshift;
chipnum++;
chip = &lpddr->chips[chipnum];
}
return 0;
}
static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
{
struct map_info *map = mtd->priv;
struct lpddr_private *lpddr = map->fldrv_priv;
int chipnum = adr >> lpddr->chipshift, err = 0;
unsigned long ofs;
/* ofs: offset within the first chip that the first read should start */
ofs = adr - (chipnum << lpddr->chipshift);
while (len) {
unsigned long thislen;
struct flchip *chip;
chip = &lpddr->chips[chipnum];
if (chipnum >= lpddr->numchips)
break;
if ((len + ofs - 1) >> lpddr->chipshift)
thislen = (1<<lpddr->chipshift) - ofs;
else
thislen = len;
mutex_lock(&chip->mutex);
if (chip->state == FL_POINT) {
chip->ref_point_counter--;
if (chip->ref_point_counter == 0)
chip->state = FL_READY;
} else {
printk(KERN_WARNING "%s: Warning: unpoint called on non"
"pointed region\n", map->name);
err = -EINVAL;
}
put_chip(map, chip);
mutex_unlock(&chip->mutex);
len -= thislen;
ofs = 0;
chipnum++;
}
return err;
}
static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct kvec vec;
vec.iov_base = (void *) buf;
vec.iov_len = len;
return lpddr_writev(mtd, &vec, 1, to, retlen);
}
static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
struct map_info *map = mtd->priv;
struct lpddr_private *lpddr = map->fldrv_priv;
int ret = 0;
int chipnum;
unsigned long ofs, vec_seek, i;
int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
size_t len = 0;
for (i = 0; i < count; i++)
len += vecs[i].iov_len;
if (!len)
return 0;
chipnum = to >> lpddr->chipshift;
ofs = to;
vec_seek = 0;
do {
/* We must not cross write block boundaries */
int size = wbufsize - (ofs & (wbufsize-1));
if (size > len)
size = len;
ret = do_write_buffer(map, &lpddr->chips[chipnum],
ofs, &vecs, &vec_seek, size);
if (ret)
return ret;
ofs += size;
(*retlen) += size;
len -= size;
/* Be nice and reschedule with the chip in a usable
* state for other processes */
cond_resched();
} while (len);
return 0;
}
static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
{
unsigned long ofs, len;
int ret;
struct map_info *map = mtd->priv;
struct lpddr_private *lpddr = map->fldrv_priv;
int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
ofs = instr->addr;
len = instr->len;
while (len > 0) {
ret = do_erase_oneblock(mtd, ofs);
if (ret)
return ret;
ofs += size;
len -= size;
}
return 0;
}
#define DO_XXLOCK_LOCK 1
#define DO_XXLOCK_UNLOCK 2
static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
{
int ret = 0;
struct map_info *map = mtd->priv;
struct lpddr_private *lpddr = map->fldrv_priv;
int chipnum = adr >> lpddr->chipshift;
struct flchip *chip = &lpddr->chips[chipnum];
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_LOCKING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
if (thunk == DO_XXLOCK_LOCK) {
send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
chip->state = FL_LOCKING;
} else if (thunk == DO_XXLOCK_UNLOCK) {
send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
chip->state = FL_UNLOCKING;
} else
BUG();
ret = wait_for_ready(map, chip, 1);
if (ret) {
printk(KERN_ERR "%s: block unlock error status %d \n",
map->name, ret);
goto out;
}
out: put_chip(map, chip);
mutex_unlock(&chip->mutex);
return ret;
}
static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
}
static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexey Korolev <[email protected]>");
MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
| linux-master | drivers/mtd/lpddr/lpddr_cmds.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* LPDDR2-NVM MTD driver. This module provides read, write, erase, lock/unlock
* support for LPDDR2-NVM PCM memories
*
* Copyright © 2012 Micron Technology, Inc.
*
* Vincenzo Aliberti <[email protected]>
* Domenico Manna <[email protected]>
* Many thanks to Andrea Vigilante for initial enabling
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/err.h>
/* Parameters */
#define ERASE_BLOCKSIZE (0x00020000/2) /* in Word */
#define WRITE_BUFFSIZE (0x00000400/2) /* in Word */
#define OW_BASE_ADDRESS 0x00000000 /* OW offset */
#define BUS_WIDTH 0x00000020 /* x32 devices */
/* PFOW symbols address offset */
#define PFOW_QUERY_STRING_P (0x0000/2) /* in Word */
#define PFOW_QUERY_STRING_F (0x0002/2) /* in Word */
#define PFOW_QUERY_STRING_O (0x0004/2) /* in Word */
#define PFOW_QUERY_STRING_W (0x0006/2) /* in Word */
/* OW registers address */
#define CMD_CODE_OFS (0x0080/2) /* in Word */
#define CMD_DATA_OFS (0x0084/2) /* in Word */
#define CMD_ADD_L_OFS (0x0088/2) /* in Word */
#define CMD_ADD_H_OFS (0x008A/2) /* in Word */
#define MPR_L_OFS (0x0090/2) /* in Word */
#define MPR_H_OFS (0x0092/2) /* in Word */
#define CMD_EXEC_OFS (0x00C0/2) /* in Word */
#define STATUS_REG_OFS (0x00CC/2) /* in Word */
#define PRG_BUFFER_OFS (0x0010/2) /* in Word */
/* Datamask */
#define MR_CFGMASK 0x8000
#define SR_OK_DATAMASK 0x0080
/* LPDDR2-NVM Commands */
#define LPDDR2_NVM_LOCK 0x0061
#define LPDDR2_NVM_UNLOCK 0x0062
#define LPDDR2_NVM_SW_PROGRAM 0x0041
#define LPDDR2_NVM_SW_OVERWRITE 0x0042
#define LPDDR2_NVM_BUF_PROGRAM 0x00E9
#define LPDDR2_NVM_BUF_OVERWRITE 0x00EA
#define LPDDR2_NVM_ERASE 0x0020
/* LPDDR2-NVM Registers offset */
#define LPDDR2_MODE_REG_DATA 0x0040
#define LPDDR2_MODE_REG_CFG 0x0050
/*
* Internal Type Definitions
* pcm_int_data contains memory controller details:
* @reg_data : LPDDR2_MODE_REG_DATA register address after remapping
* @reg_cfg : LPDDR2_MODE_REG_CFG register address after remapping
* &bus_width: memory bus-width (eg: x16 2 Bytes, x32 4 Bytes)
*/
struct pcm_int_data {
void __iomem *ctl_regs;
int bus_width;
};
static DEFINE_MUTEX(lpdd2_nvm_mutex);
/*
* Build a map_word starting from an u_long
*/
static inline map_word build_map_word(u_long myword)
{
map_word val = { {0} };
val.x[0] = myword;
return val;
}
/*
* Build Mode Register Configuration DataMask based on device bus-width
*/
static inline u_int build_mr_cfgmask(u_int bus_width)
{
u_int val = MR_CFGMASK;
if (bus_width == 0x0004) /* x32 device */
val = val << 16;
return val;
}
/*
* Build Status Register OK DataMask based on device bus-width
*/
static inline u_int build_sr_ok_datamask(u_int bus_width)
{
u_int val = SR_OK_DATAMASK;
if (bus_width == 0x0004) /* x32 device */
val = (val << 16)+val;
return val;
}
/*
* Evaluates Overlay Window Control Registers address
*/
static inline u_long ow_reg_add(struct map_info *map, u_long offset)
{
u_long val = 0;
struct pcm_int_data *pcm_data = map->fldrv_priv;
val = map->pfow_base + offset*pcm_data->bus_width;
return val;
}
/*
* Enable lpddr2-nvm Overlay Window
* Overlay Window is a memory mapped area containing all LPDDR2-NVM registers
* used by device commands as well as uservisible resources like Device Status
* Register, Device ID, etc
*/
static inline void ow_enable(struct map_info *map)
{
struct pcm_int_data *pcm_data = map->fldrv_priv;
writel_relaxed(build_mr_cfgmask(pcm_data->bus_width) | 0x18,
pcm_data->ctl_regs + LPDDR2_MODE_REG_CFG);
writel_relaxed(0x01, pcm_data->ctl_regs + LPDDR2_MODE_REG_DATA);
}
/*
* Disable lpddr2-nvm Overlay Window
* Overlay Window is a memory mapped area containing all LPDDR2-NVM registers
* used by device commands as well as uservisible resources like Device Status
* Register, Device ID, etc
*/
static inline void ow_disable(struct map_info *map)
{
struct pcm_int_data *pcm_data = map->fldrv_priv;
writel_relaxed(build_mr_cfgmask(pcm_data->bus_width) | 0x18,
pcm_data->ctl_regs + LPDDR2_MODE_REG_CFG);
writel_relaxed(0x02, pcm_data->ctl_regs + LPDDR2_MODE_REG_DATA);
}
/*
* Execute lpddr2-nvm operations
*/
static int lpddr2_nvm_do_op(struct map_info *map, u_long cmd_code,
u_long cmd_data, u_long cmd_add, u_long cmd_mpr, u_char *buf)
{
map_word add_l = { {0} }, add_h = { {0} }, mpr_l = { {0} },
mpr_h = { {0} }, data_l = { {0} }, cmd = { {0} },
exec_cmd = { {0} }, sr;
map_word data_h = { {0} }; /* only for 2x x16 devices stacked */
u_long i, status_reg, prg_buff_ofs;
struct pcm_int_data *pcm_data = map->fldrv_priv;
u_int sr_ok_datamask = build_sr_ok_datamask(pcm_data->bus_width);
/* Builds low and high words for OW Control Registers */
add_l.x[0] = cmd_add & 0x0000FFFF;
add_h.x[0] = (cmd_add >> 16) & 0x0000FFFF;
mpr_l.x[0] = cmd_mpr & 0x0000FFFF;
mpr_h.x[0] = (cmd_mpr >> 16) & 0x0000FFFF;
cmd.x[0] = cmd_code & 0x0000FFFF;
exec_cmd.x[0] = 0x0001;
data_l.x[0] = cmd_data & 0x0000FFFF;
data_h.x[0] = (cmd_data >> 16) & 0x0000FFFF; /* only for 2x x16 */
/* Set Overlay Window Control Registers */
map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS));
map_write(map, data_l, ow_reg_add(map, CMD_DATA_OFS));
map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS));
map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS));
map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS));
map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS));
if (pcm_data->bus_width == 0x0004) { /* 2x16 devices stacked */
map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS) + 2);
map_write(map, data_h, ow_reg_add(map, CMD_DATA_OFS) + 2);
map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS) + 2);
map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS) + 2);
map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS) + 2);
map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS) + 2);
}
/* Fill Program Buffer */
if ((cmd_code == LPDDR2_NVM_BUF_PROGRAM) ||
(cmd_code == LPDDR2_NVM_BUF_OVERWRITE)) {
prg_buff_ofs = (map_read(map,
ow_reg_add(map, PRG_BUFFER_OFS))).x[0];
for (i = 0; i < cmd_mpr; i++) {
map_write(map, build_map_word(buf[i]), map->pfow_base +
prg_buff_ofs + i);
}
}
/* Command Execute */
map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS));
if (pcm_data->bus_width == 0x0004) /* 2x16 devices stacked */
map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS) + 2);
/* Status Register Check */
do {
sr = map_read(map, ow_reg_add(map, STATUS_REG_OFS));
status_reg = sr.x[0];
if (pcm_data->bus_width == 0x0004) {/* 2x16 devices stacked */
sr = map_read(map, ow_reg_add(map,
STATUS_REG_OFS) + 2);
status_reg += sr.x[0] << 16;
}
} while ((status_reg & sr_ok_datamask) != sr_ok_datamask);
return (((status_reg & sr_ok_datamask) == sr_ok_datamask) ? 0 : -EIO);
}
/*
* Execute lpddr2-nvm operations @ block level
*/
static int lpddr2_nvm_do_block_op(struct mtd_info *mtd, loff_t start_add,
uint64_t len, u_char block_op)
{
struct map_info *map = mtd->priv;
u_long add, end_add;
int ret = 0;
mutex_lock(&lpdd2_nvm_mutex);
ow_enable(map);
add = start_add;
end_add = add + len;
do {
ret = lpddr2_nvm_do_op(map, block_op, 0x00, add, add, NULL);
if (ret)
goto out;
add += mtd->erasesize;
} while (add < end_add);
out:
ow_disable(map);
mutex_unlock(&lpdd2_nvm_mutex);
return ret;
}
/*
* verify presence of PFOW string
*/
static int lpddr2_nvm_pfow_present(struct map_info *map)
{
map_word pfow_val[4];
unsigned int found = 1;
mutex_lock(&lpdd2_nvm_mutex);
ow_enable(map);
/* Load string from array */
pfow_val[0] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_P));
pfow_val[1] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_F));
pfow_val[2] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_O));
pfow_val[3] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_W));
/* Verify the string loaded vs expected */
if (!map_word_equal(map, build_map_word('P'), pfow_val[0]))
found = 0;
if (!map_word_equal(map, build_map_word('F'), pfow_val[1]))
found = 0;
if (!map_word_equal(map, build_map_word('O'), pfow_val[2]))
found = 0;
if (!map_word_equal(map, build_map_word('W'), pfow_val[3]))
found = 0;
ow_disable(map);
mutex_unlock(&lpdd2_nvm_mutex);
return found;
}
/*
* lpddr2_nvm driver read method
*/
static int lpddr2_nvm_read(struct mtd_info *mtd, loff_t start_add,
size_t len, size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
mutex_lock(&lpdd2_nvm_mutex);
*retlen = len;
map_copy_from(map, buf, start_add, *retlen);
mutex_unlock(&lpdd2_nvm_mutex);
return 0;
}
/*
* lpddr2_nvm driver write method
*/
static int lpddr2_nvm_write(struct mtd_info *mtd, loff_t start_add,
size_t len, size_t *retlen, const u_char *buf)
{
struct map_info *map = mtd->priv;
struct pcm_int_data *pcm_data = map->fldrv_priv;
u_long add, current_len, tot_len, target_len, my_data;
u_char *write_buf = (u_char *)buf;
int ret = 0;
mutex_lock(&lpdd2_nvm_mutex);
ow_enable(map);
/* Set start value for the variables */
add = start_add;
target_len = len;
tot_len = 0;
while (tot_len < target_len) {
if (!(IS_ALIGNED(add, mtd->writesize))) { /* do sw program */
my_data = write_buf[tot_len];
my_data += (write_buf[tot_len+1]) << 8;
if (pcm_data->bus_width == 0x0004) {/* 2x16 devices */
my_data += (write_buf[tot_len+2]) << 16;
my_data += (write_buf[tot_len+3]) << 24;
}
ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_SW_OVERWRITE,
my_data, add, 0x00, NULL);
if (ret)
goto out;
add += pcm_data->bus_width;
tot_len += pcm_data->bus_width;
} else { /* do buffer program */
current_len = min(target_len - tot_len,
(u_long) mtd->writesize);
ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_BUF_OVERWRITE,
0x00, add, current_len, write_buf + tot_len);
if (ret)
goto out;
add += current_len;
tot_len += current_len;
}
}
out:
*retlen = tot_len;
ow_disable(map);
mutex_unlock(&lpdd2_nvm_mutex);
return ret;
}
/*
* lpddr2_nvm driver erase method
*/
static int lpddr2_nvm_erase(struct mtd_info *mtd, struct erase_info *instr)
{
return lpddr2_nvm_do_block_op(mtd, instr->addr, instr->len,
LPDDR2_NVM_ERASE);
}
/*
* lpddr2_nvm driver unlock method
*/
static int lpddr2_nvm_unlock(struct mtd_info *mtd, loff_t start_add,
uint64_t len)
{
return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_UNLOCK);
}
/*
* lpddr2_nvm driver lock method
*/
static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add,
uint64_t len)
{
return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK);
}
static const struct mtd_info lpddr2_nvm_mtd_info = {
.type = MTD_RAM,
.writesize = 1,
.flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
._read = lpddr2_nvm_read,
._write = lpddr2_nvm_write,
._erase = lpddr2_nvm_erase,
._unlock = lpddr2_nvm_unlock,
._lock = lpddr2_nvm_lock,
};
/*
* lpddr2_nvm driver probe method
*/
static int lpddr2_nvm_probe(struct platform_device *pdev)
{
struct map_info *map;
struct mtd_info *mtd;
struct resource *add_range;
struct pcm_int_data *pcm_data;
/* Allocate memory control_regs data structures */
pcm_data = devm_kzalloc(&pdev->dev, sizeof(*pcm_data), GFP_KERNEL);
if (!pcm_data)
return -ENOMEM;
pcm_data->bus_width = BUS_WIDTH;
/* Allocate memory for map_info & mtd_info data structures */
map = devm_kzalloc(&pdev->dev, sizeof(*map), GFP_KERNEL);
if (!map)
return -ENOMEM;
mtd = devm_kzalloc(&pdev->dev, sizeof(*mtd), GFP_KERNEL);
if (!mtd)
return -ENOMEM;
/* lpddr2_nvm address range */
add_range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!add_range)
return -ENODEV;
/* Populate map_info data structure */
*map = (struct map_info) {
.virt = devm_ioremap_resource(&pdev->dev, add_range),
.name = pdev->dev.init_name,
.phys = add_range->start,
.size = resource_size(add_range),
.bankwidth = pcm_data->bus_width / 2,
.pfow_base = OW_BASE_ADDRESS,
.fldrv_priv = pcm_data,
};
if (IS_ERR(map->virt))
return PTR_ERR(map->virt);
simple_map_init(map); /* fill with default methods */
pcm_data->ctl_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(pcm_data->ctl_regs))
return PTR_ERR(pcm_data->ctl_regs);
/* Populate mtd_info data structure */
*mtd = lpddr2_nvm_mtd_info;
mtd->dev.parent = &pdev->dev;
mtd->name = pdev->dev.init_name;
mtd->priv = map;
mtd->size = resource_size(add_range);
mtd->erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width;
mtd->writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width;
/* Verify the presence of the device looking for PFOW string */
if (!lpddr2_nvm_pfow_present(map)) {
pr_err("device not recognized\n");
return -EINVAL;
}
/* Parse partitions and register the MTD device */
return mtd_device_register(mtd, NULL, 0);
}
/*
* lpddr2_nvm driver remove method
*/
static int lpddr2_nvm_remove(struct platform_device *pdev)
{
WARN_ON(mtd_device_unregister(dev_get_drvdata(&pdev->dev)));
return 0;
}
/* Initialize platform_driver data structure for lpddr2_nvm */
static struct platform_driver lpddr2_nvm_drv = {
.driver = {
.name = "lpddr2_nvm",
},
.probe = lpddr2_nvm_probe,
.remove = lpddr2_nvm_remove,
};
module_platform_driver(lpddr2_nvm_drv);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vincenzo Aliberti <[email protected]>");
MODULE_DESCRIPTION("MTD driver for LPDDR2-NVM PCM memories");
| linux-master | drivers/mtd/lpddr/lpddr2_nvm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Probing flash chips with QINFO records.
* (C) 2008 Korolev Alexey <[email protected]>
* (C) 2008 Vasiliy Leonenko <[email protected]>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/mtd/xip.h>
#include <linux/mtd/map.h>
#include <linux/mtd/pfow.h>
#include <linux/mtd/qinfo.h>
static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr);
struct mtd_info *lpddr_probe(struct map_info *map);
static struct lpddr_private *lpddr_probe_chip(struct map_info *map);
static int lpddr_pfow_present(struct map_info *map,
struct lpddr_private *lpddr);
static struct qinfo_query_info qinfo_array[] = {
/* General device info */
{0, 0, "DevSizeShift", "Device size 2^n bytes"},
{0, 3, "BufSizeShift", "Program buffer size 2^n bytes"},
/* Erase block information */
{1, 1, "TotalBlocksNum", "Total number of blocks"},
{1, 2, "UniformBlockSizeShift", "Uniform block size 2^n bytes"},
/* Partition information */
{2, 1, "HWPartsNum", "Number of hardware partitions"},
/* Optional features */
{5, 1, "SuspEraseSupp", "Suspend erase supported"},
/* Operation typical time */
{10, 0, "SingleWordProgTime", "Single word program 2^n u-sec"},
{10, 1, "ProgBufferTime", "Program buffer write 2^n u-sec"},
{10, 2, "BlockEraseTime", "Block erase 2^n m-sec"},
{10, 3, "FullChipEraseTime", "Full chip erase 2^n m-sec"},
};
static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
{
int qinfo_lines = ARRAY_SIZE(qinfo_array);
int i;
int bankwidth = map_bankwidth(map) * 8;
int major, minor;
for (i = 0; i < qinfo_lines; i++) {
if (strcmp(id_str, qinfo_array[i].id_str) == 0) {
major = qinfo_array[i].major & ((1 << bankwidth) - 1);
minor = qinfo_array[i].minor & ((1 << bankwidth) - 1);
return minor | (major << bankwidth);
}
}
printk(KERN_ERR"%s qinfo id string is wrong! \n", map->name);
BUG();
return -1;
}
static uint16_t lpddr_info_query(struct map_info *map, char *id_str)
{
unsigned int dsr, val;
int bits_per_chip = map_bankwidth(map) * 8;
unsigned long adr = lpddr_get_qinforec_pos(map, id_str);
int attempts = 20;
/* Write a request for the PFOW record */
map_write(map, CMD(LPDDR_INFO_QUERY),
map->pfow_base + PFOW_COMMAND_CODE);
map_write(map, CMD(adr & ((1 << bits_per_chip) - 1)),
map->pfow_base + PFOW_COMMAND_ADDRESS_L);
map_write(map, CMD(adr >> bits_per_chip),
map->pfow_base + PFOW_COMMAND_ADDRESS_H);
map_write(map, CMD(LPDDR_START_EXECUTION),
map->pfow_base + PFOW_COMMAND_EXECUTE);
while ((attempts--) > 0) {
dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
if (dsr & DSR_READY_STATUS)
break;
udelay(10);
}
val = CMDVAL(map_read(map, map->pfow_base + PFOW_COMMAND_DATA));
return val;
}
static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
{
map_word pfow_val[4];
/* Check identification string */
pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
pfow_val[2] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_O);
pfow_val[3] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_W);
if (!map_word_equal(map, CMD('P'), pfow_val[0]))
goto out;
if (!map_word_equal(map, CMD('F'), pfow_val[1]))
goto out;
if (!map_word_equal(map, CMD('O'), pfow_val[2]))
goto out;
if (!map_word_equal(map, CMD('W'), pfow_val[3]))
goto out;
return 1; /* "PFOW" is found */
out:
printk(KERN_WARNING"%s: PFOW string at 0x%lx is not found \n",
map->name, map->pfow_base);
return 0;
}
static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr)
{
lpddr->qinfo = kzalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
if (!lpddr->qinfo)
return 0;
/* Get the ManuID */
lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID));
/* Get the DeviceID */
lpddr->DevId = CMDVAL(map_read(map, map->pfow_base + PFOW_DEVICE_ID));
/* read parameters from chip qinfo table */
lpddr->qinfo->DevSizeShift = lpddr_info_query(map, "DevSizeShift");
lpddr->qinfo->TotalBlocksNum = lpddr_info_query(map, "TotalBlocksNum");
lpddr->qinfo->BufSizeShift = lpddr_info_query(map, "BufSizeShift");
lpddr->qinfo->HWPartsNum = lpddr_info_query(map, "HWPartsNum");
lpddr->qinfo->UniformBlockSizeShift =
lpddr_info_query(map, "UniformBlockSizeShift");
lpddr->qinfo->SuspEraseSupp = lpddr_info_query(map, "SuspEraseSupp");
lpddr->qinfo->SingleWordProgTime =
lpddr_info_query(map, "SingleWordProgTime");
lpddr->qinfo->ProgBufferTime = lpddr_info_query(map, "ProgBufferTime");
lpddr->qinfo->BlockEraseTime = lpddr_info_query(map, "BlockEraseTime");
return 1;
}
static struct lpddr_private *lpddr_probe_chip(struct map_info *map)
{
struct lpddr_private lpddr;
struct lpddr_private *retlpddr;
int numvirtchips;
if ((map->pfow_base + 0x1000) >= map->size) {
printk(KERN_NOTICE"%s Probe at base (0x%08lx) past the end of"
"the map(0x%08lx)\n", map->name,
(unsigned long)map->pfow_base, map->size - 1);
return NULL;
}
memset(&lpddr, 0, sizeof(struct lpddr_private));
if (!lpddr_pfow_present(map, &lpddr))
return NULL;
if (!lpddr_chip_setup(map, &lpddr))
return NULL;
/* Ok so we found a chip */
lpddr.chipshift = lpddr.qinfo->DevSizeShift;
lpddr.numchips = 1;
numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum;
retlpddr = kzalloc(struct_size(retlpddr, chips, numvirtchips),
GFP_KERNEL);
if (!retlpddr)
return NULL;
memcpy(retlpddr, &lpddr, sizeof(struct lpddr_private));
retlpddr->numchips = numvirtchips;
retlpddr->chipshift = retlpddr->qinfo->DevSizeShift -
__ffs(retlpddr->qinfo->HWPartsNum);
return retlpddr;
}
struct mtd_info *lpddr_probe(struct map_info *map)
{
struct mtd_info *mtd = NULL;
struct lpddr_private *lpddr;
/* First probe the map to see if we havecan open PFOW here */
lpddr = lpddr_probe_chip(map);
if (!lpddr)
return NULL;
map->fldrv_priv = lpddr;
mtd = lpddr_cmdset(map);
if (mtd) {
if (mtd->size > map->size) {
printk(KERN_WARNING "Reducing visibility of %ldKiB chip"
"to %ldKiB\n", (unsigned long)mtd->size >> 10,
(unsigned long)map->size >> 10);
mtd->size = map->size;
}
return mtd;
}
kfree(lpddr->qinfo);
kfree(lpddr);
map->fldrv_priv = NULL;
return NULL;
}
static struct mtd_chip_driver lpddr_chipdrv = {
.probe = lpddr_probe,
.name = "qinfo_probe",
.module = THIS_MODULE
};
static int __init lpddr_probe_init(void)
{
register_mtd_chip_driver(&lpddr_chipdrv);
return 0;
}
static void __exit lpddr_probe_exit(void)
{
unregister_mtd_chip_driver(&lpddr_chipdrv);
}
module_init(lpddr_probe_init);
module_exit(lpddr_probe_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vasiliy Leonenko <[email protected]>");
MODULE_DESCRIPTION("Driver to probe qinfo flash chips");
| linux-master | drivers/mtd/lpddr/qinfo_probe.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common Flash Interface support:
* Generic utility functions not dependent on command set
*
* Copyright (C) 2002 Red Hat
* Copyright (C) 2003 STMicroelectronics Limited
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/mtd/xip.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
void cfi_udelay(int us)
{
if (us >= 1000) {
msleep(DIV_ROUND_UP(us, 1000));
} else {
udelay(us);
cond_resched();
}
}
EXPORT_SYMBOL(cfi_udelay);
/*
* Returns the command address according to the given geometry.
*/
uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
struct map_info *map, struct cfi_private *cfi)
{
unsigned bankwidth = map_bankwidth(map);
unsigned interleave = cfi_interleave(cfi);
unsigned type = cfi->device_type;
uint32_t addr;
addr = (cmd_ofs * type) * interleave;
/* Modify the unlock address if we are in compatibility mode.
* For 16bit devices on 8 bit busses
* and 32bit devices on 16 bit busses
* set the low bit of the alternating bit sequence of the address.
*/
if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
addr |= (type >> 1)*interleave;
return addr;
}
EXPORT_SYMBOL(cfi_build_cmd_addr);
/*
* Transforms the CFI command for the given geometry (bus width & interleave).
* It looks too long to be inline, but in the common case it should almost all
* get optimised away.
*/
map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
{
map_word val = { {0} };
int wordwidth, words_per_bus, chip_mode, chips_per_word;
unsigned long onecmd;
int i;
/* We do it this way to give the compiler a fighting chance
of optimising away all the crap for 'bankwidth' larger than
an unsigned long, in the common case where that support is
disabled */
if (map_bankwidth_is_large(map)) {
wordwidth = sizeof(unsigned long);
words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
} else {
wordwidth = map_bankwidth(map);
words_per_bus = 1;
}
chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
/* First, determine what the bit-pattern should be for a single
device, according to chip mode and endianness... */
switch (chip_mode) {
default: BUG();
case 1:
onecmd = cmd;
break;
case 2:
onecmd = cpu_to_cfi16(map, cmd);
break;
case 4:
onecmd = cpu_to_cfi32(map, cmd);
break;
}
/* Now replicate it across the size of an unsigned long, or
just to the bus width as appropriate */
switch (chips_per_word) {
default: BUG();
#if BITS_PER_LONG >= 64
case 8:
onecmd |= (onecmd << (chip_mode * 32));
fallthrough;
#endif
case 4:
onecmd |= (onecmd << (chip_mode * 16));
fallthrough;
case 2:
onecmd |= (onecmd << (chip_mode * 8));
fallthrough;
case 1:
;
}
/* And finally, for the multi-word case, replicate it
in all words in the structure */
for (i=0; i < words_per_bus; i++) {
val.x[i] = onecmd;
}
return val;
}
EXPORT_SYMBOL(cfi_build_cmd);
unsigned long cfi_merge_status(map_word val, struct map_info *map,
struct cfi_private *cfi)
{
int wordwidth, words_per_bus, chip_mode, chips_per_word;
unsigned long onestat, res = 0;
int i;
/* We do it this way to give the compiler a fighting chance
of optimising away all the crap for 'bankwidth' larger than
an unsigned long, in the common case where that support is
disabled */
if (map_bankwidth_is_large(map)) {
wordwidth = sizeof(unsigned long);
words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
} else {
wordwidth = map_bankwidth(map);
words_per_bus = 1;
}
chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
onestat = val.x[0];
/* Or all status words together */
for (i=1; i < words_per_bus; i++) {
onestat |= val.x[i];
}
res = onestat;
switch(chips_per_word) {
default: BUG();
#if BITS_PER_LONG >= 64
case 8:
res |= (onestat >> (chip_mode * 32));
fallthrough;
#endif
case 4:
res |= (onestat >> (chip_mode * 16));
fallthrough;
case 2:
res |= (onestat >> (chip_mode * 8));
fallthrough;
case 1:
;
}
/* Last, determine what the bit-pattern should be for a single
device, according to chip mode and endianness... */
switch (chip_mode) {
case 1:
break;
case 2:
res = cfi16_to_cpu(map, res);
break;
case 4:
res = cfi32_to_cpu(map, res);
break;
default: BUG();
}
return res;
}
EXPORT_SYMBOL(cfi_merge_status);
/*
* Sends a CFI command to a bank of flash for the given geometry.
*
* Returns the offset in flash where the command was written.
* If prev_val is non-null, it will be set to the value at the command address,
* before the command was written.
*/
uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
struct map_info *map, struct cfi_private *cfi,
int type, map_word *prev_val)
{
map_word val;
uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
val = cfi_build_cmd(cmd, map, cfi);
if (prev_val)
*prev_val = map_read(map, addr);
map_write(map, val, addr);
return addr - base;
}
EXPORT_SYMBOL(cfi_send_gen_cmd);
int __xipram cfi_qry_present(struct map_info *map, __u32 base,
struct cfi_private *cfi)
{
int osf = cfi->interleave * cfi->device_type; /* scale factor */
map_word val[3];
map_word qry[3];
qry[0] = cfi_build_cmd('Q', map, cfi);
qry[1] = cfi_build_cmd('R', map, cfi);
qry[2] = cfi_build_cmd('Y', map, cfi);
val[0] = map_read(map, base + osf*0x10);
val[1] = map_read(map, base + osf*0x11);
val[2] = map_read(map, base + osf*0x12);
if (!map_word_equal(map, qry[0], val[0]))
return 0;
if (!map_word_equal(map, qry[1], val[1]))
return 0;
if (!map_word_equal(map, qry[2], val[2]))
return 0;
return 1; /* "QRY" found */
}
EXPORT_SYMBOL_GPL(cfi_qry_present);
int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
struct cfi_private *cfi)
{
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
if (cfi_qry_present(map, base, cfi))
return 1;
/* QRY not found probably we deal with some odd CFI chips */
/* Some revisions of some old Intel chips? */
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
if (cfi_qry_present(map, base, cfi))
return 1;
/* ST M29DW chips */
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
if (cfi_qry_present(map, base, cfi))
return 1;
/* some old SST chips, e.g. 39VF160x/39VF320x */
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
if (cfi_qry_present(map, base, cfi))
return 1;
/* SST 39VF640xB */
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
if (cfi_qry_present(map, base, cfi))
return 1;
/* QRY not found */
return 0;
}
EXPORT_SYMBOL_GPL(cfi_qry_mode_on);
void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
struct cfi_private *cfi)
{
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
/* M29W128G flashes require an additional reset command
when exit qry mode */
if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E))
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
}
EXPORT_SYMBOL_GPL(cfi_qry_mode_off);
struct cfi_extquery *
__xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
{
struct cfi_private *cfi = map->fldrv_priv;
__u32 base = 0; // cfi->chips[0].start;
int ofs_factor = cfi->interleave * cfi->device_type;
int i;
struct cfi_extquery *extp = NULL;
if (!adr)
goto out;
printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);
extp = kmalloc(size, GFP_KERNEL);
if (!extp)
goto out;
#ifdef CONFIG_MTD_XIP
local_irq_disable();
#endif
/* Switch it into Query Mode */
cfi_qry_mode_on(base, map, cfi);
/* Read in the Extended Query Table */
for (i=0; i<size; i++) {
((unsigned char *)extp)[i] =
cfi_read_query(map, base+((adr+i)*ofs_factor));
}
/* Make sure it returns to read mode */
cfi_qry_mode_off(base, map, cfi);
#ifdef CONFIG_MTD_XIP
(void) map_read(map, base);
xip_iprefetch();
local_irq_enable();
#endif
out: return extp;
}
EXPORT_SYMBOL(cfi_read_pri);
void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_fixup *f;
for (f=fixups; f->fixup; f++) {
if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
f->fixup(mtd);
}
}
}
EXPORT_SYMBOL(cfi_fixup);
int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
loff_t ofs, size_t len, void *thunk)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long adr;
int chipnum, ret = 0;
int i, first;
struct mtd_erase_region_info *regions = mtd->eraseregions;
/* Check that both start and end of the requested erase are
* aligned with the erasesize at the appropriate addresses.
*/
i = 0;
/* Skip all erase regions which are ended before the start of
the requested erase. Actually, to save on the calculations,
we skip to the first erase region which starts after the
start of the requested erase, and then go back one.
*/
while (i < mtd->numeraseregions && ofs >= regions[i].offset)
i++;
i--;
/* OK, now i is pointing at the erase region in which this
erase request starts. Check the start of the requested
erase range is aligned with the erase size which is in
effect here.
*/
if (ofs & (regions[i].erasesize-1))
return -EINVAL;
/* Remember the erase region we start on */
first = i;
/* Next, check that the end of the requested erase is aligned
* with the erase region at that address.
*/
while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
i++;
/* As before, drop back one to point at the region in which
the address actually falls
*/
i--;
if ((ofs + len) & (regions[i].erasesize-1))
return -EINVAL;
chipnum = ofs >> cfi->chipshift;
adr = ofs - (chipnum << cfi->chipshift);
i=first;
while(len) {
int size = regions[i].erasesize;
ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
if (ret)
return ret;
adr += size;
ofs += size;
len -= size;
if (ofs == regions[i].offset + size * regions[i].numblocks)
i++;
if (adr >> cfi->chipshift) {
adr = 0;
chipnum++;
if (chipnum >= cfi->numchips)
break;
}
}
return 0;
}
EXPORT_SYMBOL(cfi_varsize_frob);
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/chips/cfi_util.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common code to handle map devices which are simple ROM
* (C) 2000 Red Hat.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
static int maprom_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static void maprom_nop (struct mtd_info *);
static struct mtd_info *map_rom_probe(struct map_info *map);
static int maprom_erase (struct mtd_info *mtd, struct erase_info *info);
static int maprom_point (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys);
static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
static struct mtd_chip_driver maprom_chipdrv = {
.probe = map_rom_probe,
.name = "map_rom",
.module = THIS_MODULE
};
static unsigned int default_erasesize(struct map_info *map)
{
const __be32 *erase_size = NULL;
erase_size = of_get_property(map->device_node, "erase-size", NULL);
return !erase_size ? map->size : be32_to_cpu(*erase_size);
}
static struct mtd_info *map_rom_probe(struct map_info *map)
{
struct mtd_info *mtd;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
if (!mtd)
return NULL;
map->fldrv = &maprom_chipdrv;
mtd->priv = map;
mtd->name = map->name;
mtd->type = MTD_ROM;
mtd->size = map->size;
mtd->_point = maprom_point;
mtd->_unpoint = maprom_unpoint;
mtd->_read = maprom_read;
mtd->_write = maprom_write;
mtd->_sync = maprom_nop;
mtd->_erase = maprom_erase;
mtd->flags = MTD_CAP_ROM;
mtd->erasesize = default_erasesize(map);
mtd->writesize = 1;
mtd->writebufsize = 1;
__module_get(THIS_MODULE);
return mtd;
}
static int maprom_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
if (!map->virt)
return -EINVAL;
*virt = map->virt + from;
if (phys)
*phys = map->phys + from;
*retlen = len;
return 0;
}
static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
return 0;
}
static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
map_copy_from(map, buf, from, len);
*retlen = len;
return 0;
}
static void maprom_nop(struct mtd_info *mtd)
{
/* Nothing to see here */
}
static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
{
return -EROFS;
}
static int maprom_erase (struct mtd_info *mtd, struct erase_info *info)
{
/* We do our best 8) */
return -EROFS;
}
static int __init map_rom_init(void)
{
register_mtd_chip_driver(&maprom_chipdrv);
return 0;
}
static void __exit map_rom_exit(void)
{
unregister_mtd_chip_driver(&maprom_chipdrv);
}
module_init(map_rom_init);
module_exit(map_rom_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]>");
MODULE_DESCRIPTION("MTD chip driver for ROM chips");
| linux-master | drivers/mtd/chips/map_rom.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Registration for chip drivers
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
static DEFINE_SPINLOCK(chip_drvs_lock);
static LIST_HEAD(chip_drvs_list);
void register_mtd_chip_driver(struct mtd_chip_driver *drv)
{
spin_lock(&chip_drvs_lock);
list_add(&drv->list, &chip_drvs_list);
spin_unlock(&chip_drvs_lock);
}
void unregister_mtd_chip_driver(struct mtd_chip_driver *drv)
{
spin_lock(&chip_drvs_lock);
list_del(&drv->list);
spin_unlock(&chip_drvs_lock);
}
static struct mtd_chip_driver *get_mtd_chip_driver (const char *name)
{
struct mtd_chip_driver *ret = NULL, *this;
spin_lock(&chip_drvs_lock);
list_for_each_entry(this, &chip_drvs_list, list) {
if (!strcmp(this->name, name)) {
ret = this;
break;
}
}
if (ret && !try_module_get(ret->module))
ret = NULL;
spin_unlock(&chip_drvs_lock);
return ret;
}
/* Hide all the horrid details, like some silly person taking
get_module_symbol() away from us, from the caller. */
struct mtd_info *do_map_probe(const char *name, struct map_info *map)
{
struct mtd_chip_driver *drv;
struct mtd_info *ret;
drv = get_mtd_chip_driver(name);
if (!drv && !request_module("%s", name))
drv = get_mtd_chip_driver(name);
if (!drv)
return NULL;
ret = drv->probe(map);
/* We decrease the use count here. It may have been a
probe-only module, which is no longer required from this
point, having given us a handle on (and increased the use
count of) the actual driver code.
*/
module_put(drv->module);
return ret;
}
/*
* Destroy an MTD device which was created for a map device.
* Make sure the MTD device is already unregistered before calling this
*/
void map_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
if (map->fldrv->destroy)
map->fldrv->destroy(mtd);
module_put(map->fldrv->module);
kfree(mtd);
}
EXPORT_SYMBOL(register_mtd_chip_driver);
EXPORT_SYMBOL(unregister_mtd_chip_driver);
EXPORT_SYMBOL(do_map_probe);
EXPORT_SYMBOL(map_destroy);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]>");
MODULE_DESCRIPTION("Core routines for registering and invoking MTD chip drivers");
| linux-master | drivers/mtd/chips/chipreg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common Flash Interface support:
* AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
*
* Copyright (C) 2000 Crossnet Co. <[email protected]>
* Copyright (C) 2004 Arcom Control Systems Ltd <[email protected]>
* Copyright (C) 2005 MontaVista Software Inc. <[email protected]>
*
* 2_by_8 routines added by Simon Munton
*
* 4_by_16 work by Carolyn J. Smith
*
* XIP support hooks by Vitaly Wool (based on code for Intel flash
* by Nicolas Pitre)
*
* 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
*
* Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/of.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/xip.h>
#define AMD_BOOTLOC_BUG
#define FORCE_WORD_WRITE 0
#define MAX_RETRIES 3
#define SST49LF004B 0x0060
#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
#define AT49BV6416 0x00d6
#define S29GL064N_MN12 0x0c01
/*
* Status Register bit description. Used by flash devices that don't
* support DQ polling (e.g. HyperFlash)
*/
#define CFI_SR_DRB BIT(7)
#define CFI_SR_ESB BIT(5)
#define CFI_SR_PSB BIT(4)
#define CFI_SR_WBASB BIT(3)
#define CFI_SR_SLSB BIT(1)
enum cfi_quirks {
CFI_QUIRK_DQ_TRUE_DATA = BIT(0),
};
static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
#if !FORCE_WORD_WRITE
static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
#endif
static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
static void cfi_amdstd_sync (struct mtd_info *);
static int cfi_amdstd_suspend (struct mtd_info *);
static void cfi_amdstd_resume (struct mtd_info *);
static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
size_t *, struct otp_info *);
static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
size_t *, struct otp_info *);
static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
size_t *, u_char *);
static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
size_t *, u_char *);
static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
size_t *, const u_char *);
static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf);
static void cfi_amdstd_destroy(struct mtd_info *);
struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
#include "fwh_lock.h"
static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static struct mtd_chip_driver cfi_amdstd_chipdrv = {
.probe = NULL, /* Not usable directly */
.destroy = cfi_amdstd_destroy,
.name = "cfi_cmdset_0002",
.module = THIS_MODULE
};
/*
* Use status register to poll for Erase/write completion when DQ is not
* supported. This is indicated by Bit[1:0] of SoftwareFeatures field in
* CFI Primary Vendor-Specific Extended Query table 1.5
*/
static int cfi_use_status_reg(struct cfi_private *cfi)
{
struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
return extp && extp->MinorVersion >= '5' &&
(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
}
static int cfi_check_err_status(struct map_info *map, struct flchip *chip,
unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status;
if (!cfi_use_status_reg(cfi))
return 0;
cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
status = map_read(map, adr);
/* The error bits are invalid while the chip's busy */
if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB)))
return 0;
if (map_word_bitsset(map, status, CMD(0x3a))) {
unsigned long chipstatus = MERGESTATUS(status);
if (chipstatus & CFI_SR_ESB)
pr_err("%s erase operation failed, status %lx\n",
map->name, chipstatus);
if (chipstatus & CFI_SR_PSB)
pr_err("%s program operation failed, status %lx\n",
map->name, chipstatus);
if (chipstatus & CFI_SR_WBASB)
pr_err("%s buffer program command aborted, status %lx\n",
map->name, chipstatus);
if (chipstatus & CFI_SR_SLSB)
pr_err("%s sector write protected, status %lx\n",
map->name, chipstatus);
/* Erase/Program status bits are set on the operation failure */
if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB))
return 1;
}
return 0;
}
/* #define DEBUG_CFI_FEATURES */
#ifdef DEBUG_CFI_FEATURES
static void cfi_tell_features(struct cfi_pri_amdstd *extp)
{
const char* erase_suspend[3] = {
"Not supported", "Read only", "Read/write"
};
const char* top_bottom[6] = {
"No WP", "8x8KiB sectors at top & bottom, no WP",
"Bottom boot", "Top boot",
"Uniform, Bottom WP", "Uniform, Top WP"
};
printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
printk(" Address sensitive unlock: %s\n",
(extp->SiliconRevision & 1) ? "Not required" : "Required");
if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
else
printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
if (extp->BlkProt == 0)
printk(" Block protection: Not supported\n");
else
printk(" Block protection: %d sectors per group\n", extp->BlkProt);
printk(" Temporary block unprotect: %s\n",
extp->TmpBlkUnprotect ? "Supported" : "Not supported");
printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
printk(" Burst mode: %s\n",
extp->BurstMode ? "Supported" : "Not supported");
if (extp->PageMode == 0)
printk(" Page mode: Not supported\n");
else
printk(" Page mode: %d word page\n", extp->PageMode << 2);
printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
extp->VppMin >> 4, extp->VppMin & 0xf);
printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
extp->VppMax >> 4, extp->VppMax & 0xf);
if (extp->TopBottom < ARRAY_SIZE(top_bottom))
printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
else
printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
}
#endif
#ifdef AMD_BOOTLOC_BUG
/* Wheee. Bring me the head of someone at AMD. */
static void fixup_amd_bootblock(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
__u8 major = extp->MajorVersion;
__u8 minor = extp->MinorVersion;
if (((major << 8) | minor) < 0x3131) {
/* CFI version 1.0 => don't trust bootloc */
pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
map->name, cfi->mfr, cfi->id);
/* AFAICS all 29LV400 with a bottom boot block have a device ID
* of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
* These were badly detected as they have the 0x80 bit set
* so treat them as a special case.
*/
if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
/* Macronix added CFI to their 2nd generation
* MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
* Fujitsu, Spansion, EON, ESI and older Macronix)
* has CFI.
*
* Therefore also check the manufacturer.
* This reduces the risk of false detection due to
* the 8-bit device ID.
*/
(cfi->mfr == CFI_MFR_MACRONIX)) {
pr_debug("%s: Macronix MX29LV400C with bottom boot block"
" detected\n", map->name);
extp->TopBottom = 2; /* bottom boot */
} else
if (cfi->id & 0x80) {
printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
extp->TopBottom = 3; /* top boot */
} else {
extp->TopBottom = 2; /* bottom boot */
}
pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
" deduced %s from Device ID\n", map->name, major, minor,
extp->TopBottom == 2 ? "bottom" : "top");
}
}
#endif
#if !FORCE_WORD_WRITE
static void fixup_use_write_buffers(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x2201)
return;
if (cfi->cfiq->BufWriteTimeoutTyp) {
pr_debug("Using buffer write method\n");
mtd->_write = cfi_amdstd_write_buffers;
}
}
#endif /* !FORCE_WORD_WRITE */
/* Atmel chips don't use the same PRI format as AMD chips */
static void fixup_convert_atmel_pri(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
struct cfi_pri_atmel atmel_pri;
memcpy(&atmel_pri, extp, sizeof(atmel_pri));
memset((char *)extp + 5, 0, sizeof(*extp) - 5);
if (atmel_pri.Features & 0x02)
extp->EraseSuspend = 2;
/* Some chips got it backwards... */
if (cfi->id == AT49BV6416) {
if (atmel_pri.BottomBoot)
extp->TopBottom = 3;
else
extp->TopBottom = 2;
} else {
if (atmel_pri.BottomBoot)
extp->TopBottom = 2;
else
extp->TopBottom = 3;
}
/* burst write mode not supported */
cfi->cfiq->BufWriteTimeoutTyp = 0;
cfi->cfiq->BufWriteTimeoutMax = 0;
}
static void fixup_use_secsi(struct mtd_info *mtd)
{
/* Setup for chips with a secsi area */
mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
}
static void fixup_use_erase_chip(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
if ((cfi->cfiq->NumEraseRegions == 1) &&
((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
mtd->_erase = cfi_amdstd_erase_chip;
}
}
/*
* Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
* locked by default.
*/
static void fixup_use_atmel_lock(struct mtd_info *mtd)
{
mtd->_lock = cfi_atmel_lock;
mtd->_unlock = cfi_atmel_unlock;
mtd->flags |= MTD_POWERUP_LOCK;
}
static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
/*
* These flashes report two separate eraseblock regions based on the
* sector_erase-size and block_erase-size, although they both operate on the
* same memory. This is not allowed according to CFI, so we just pick the
* sector_erase-size.
*/
cfi->cfiq->NumEraseRegions = 1;
}
static void fixup_sst39vf(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
fixup_old_sst_eraseregion(mtd);
cfi->addr_unlock1 = 0x5555;
cfi->addr_unlock2 = 0x2AAA;
}
static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
fixup_old_sst_eraseregion(mtd);
cfi->addr_unlock1 = 0x555;
cfi->addr_unlock2 = 0x2AA;
cfi->sector_erase_cmd = CMD(0x50);
}
static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
fixup_sst39vf_rev_b(mtd);
/*
* CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
* it should report a size of 8KBytes (0x0020*256).
*/
cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
mtd->name);
}
static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
mtd->name);
}
}
static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
mtd->name);
}
}
static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
/*
* S29NS512P flash uses more than 8bits to report number of sectors,
* which is not permitted by CFI.
*/
cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
mtd->name);
}
static void fixup_quirks(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
if (cfi->mfr == CFI_MFR_AMD && cfi->id == S29GL064N_MN12)
cfi->quirks |= CFI_QUIRK_DQ_TRUE_DATA;
}
/* Used to fix CFI-Tables of chips without Extended Query Tables */
static struct cfi_fixup cfi_nopri_fixup_table[] = {
{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
{ CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
{ CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
{ CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
{ CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
{ CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
{ CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
{ CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
{ 0, 0, NULL }
};
static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
#ifdef AMD_BOOTLOC_BUG
{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
{ CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
{ CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
#endif
{ CFI_MFR_AMD, 0x0050, fixup_use_secsi },
{ CFI_MFR_AMD, 0x0053, fixup_use_secsi },
{ CFI_MFR_AMD, 0x0055, fixup_use_secsi },
{ CFI_MFR_AMD, 0x0056, fixup_use_secsi },
{ CFI_MFR_AMD, 0x005C, fixup_use_secsi },
{ CFI_MFR_AMD, 0x005F, fixup_use_secsi },
{ CFI_MFR_AMD, S29GL064N_MN12, fixup_s29gl064n_sectors },
{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
{ CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
{ CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
{ CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
{ CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
{ CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
#if !FORCE_WORD_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
#endif
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_quirks },
{ 0, 0, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
{ CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
{ CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
{ CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
{ 0, 0, NULL }
};
static struct cfi_fixup fixup_table[] = {
/* The CFI vendor ids and the JEDEC vendor IDs appear
* to be common. It is like the devices id's are as
* well. This table is to pick all cases where
* we know that is the case.
*/
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
{ 0, 0, NULL }
};
static void cfi_fixup_major_minor(struct cfi_private *cfi,
struct cfi_pri_amdstd *extp)
{
if (cfi->mfr == CFI_MFR_SAMSUNG) {
if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
(extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
/*
* Samsung K8P2815UQB and K8D6x16UxM chips
* report major=0 / minor=0.
* K8D3x16UxC chips report major=3 / minor=3.
*/
printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
" Extended Query version to 1.%c\n",
extp->MinorVersion);
extp->MajorVersion = '1';
}
}
/*
* SST 38VF640x chips report major=0xFF / minor=0xFF.
*/
if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
extp->MajorVersion = '1';
extp->MinorVersion = '0';
}
}
static int is_m29ew(struct cfi_private *cfi)
{
if (cfi->mfr == CFI_MFR_INTEL &&
((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
(cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
return 1;
return 0;
}
/*
* From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
* Some revisions of the M29EW suffer from erase suspend hang ups. In
* particular, it can occur when the sequence
* Erase Confirm -> Suspend -> Program -> Resume
* causes a lockup due to internal timing issues. The consequence is that the
* erase cannot be resumed without inserting a dummy command after programming
* and prior to resuming. [...] The work-around is to issue a dummy write cycle
* that writes an F0 command code before the RESUME command.
*/
static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
/* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
if (is_m29ew(cfi))
map_write(map, CMD(0xF0), adr);
}
/*
* From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
*
* Some revisions of the M29EW (for example, A1 and A2 step revisions)
* are affected by a problem that could cause a hang up when an ERASE SUSPEND
* command is issued after an ERASE RESUME operation without waiting for a
* minimum delay. The result is that once the ERASE seems to be completed
* (no bits are toggling), the contents of the Flash memory block on which
* the erase was ongoing could be inconsistent with the expected values
* (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
* values), causing a consequent failure of the ERASE operation.
* The occurrence of this issue could be high, especially when file system
* operations on the Flash are intensive. As a result, it is recommended
* that a patch be applied. Intensive file system operations can cause many
* calls to the garbage routine to free Flash space (also by erasing physical
* Flash blocks) and as a result, many consecutive SUSPEND and RESUME
* commands can occur. The problem disappears when a delay is inserted after
* the RESUME command by using the udelay() function available in Linux.
* The DELAY value must be tuned based on the customer's platform.
* The maximum value that fixes the problem in all cases is 500us.
* But, in our experience, a delay of 30 µs to 50 µs is sufficient
* in most cases.
* We have chosen 500µs because this latency is acceptable.
*/
static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
{
/*
* Resolving the Delay After Resume Issue see Micron TN-13-07
* Worst case delay must be 500µs but 30-50µs should be ok as well
*/
if (is_m29ew(cfi))
cfi_udelay(500);
}
struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
struct device_node __maybe_unused *np = map->device_node;
struct mtd_info *mtd;
int i;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
if (!mtd)
return NULL;
mtd->priv = map;
mtd->type = MTD_NORFLASH;
/* Fill in the default mtd operations */
mtd->_erase = cfi_amdstd_erase_varsize;
mtd->_write = cfi_amdstd_write_words;
mtd->_read = cfi_amdstd_read;
mtd->_sync = cfi_amdstd_sync;
mtd->_suspend = cfi_amdstd_suspend;
mtd->_resume = cfi_amdstd_resume;
mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
pr_debug("MTD %s(): write buffer size %d\n", __func__,
mtd->writebufsize);
mtd->_panic_write = cfi_amdstd_panic_write;
mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
if (cfi->cfi_mode==CFI_MODE_CFI){
unsigned char bootloc;
__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
struct cfi_pri_amdstd *extp;
extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
if (extp) {
/*
* It's a real CFI chip, not one for which the probe
* routine faked a CFI structure.
*/
cfi_fixup_major_minor(cfi, extp);
/*
* Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
* see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
* http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
* http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
* http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
*/
if (extp->MajorVersion != '1' ||
(extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
"version %c.%c (%#02x/%#02x).\n",
extp->MajorVersion, extp->MinorVersion,
extp->MajorVersion, extp->MinorVersion);
kfree(extp);
kfree(mtd);
return NULL;
}
printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
extp->MajorVersion, extp->MinorVersion);
/* Install our own private info structure */
cfi->cmdset_priv = extp;
/* Apply cfi device specific fixups */
cfi_fixup(mtd, cfi_fixup_table);
#ifdef DEBUG_CFI_FEATURES
/* Tell the user about it in lots of lovely detail */
cfi_tell_features(extp);
#endif
#ifdef CONFIG_OF
if (np && of_property_read_bool(
np, "use-advanced-sector-protection")
&& extp->BlkProtUnprot == 8) {
printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
mtd->_lock = cfi_ppb_lock;
mtd->_unlock = cfi_ppb_unlock;
mtd->_is_locked = cfi_ppb_is_locked;
}
#endif
bootloc = extp->TopBottom;
if ((bootloc < 2) || (bootloc > 5)) {
printk(KERN_WARNING "%s: CFI contains unrecognised boot "
"bank location (%d). Assuming bottom.\n",
map->name, bootloc);
bootloc = 2;
}
if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
int j = (cfi->cfiq->NumEraseRegions-1)-i;
swap(cfi->cfiq->EraseRegionInfo[i],
cfi->cfiq->EraseRegionInfo[j]);
}
}
/* Set the default CFI lock/unlock addresses */
cfi->addr_unlock1 = 0x555;
cfi->addr_unlock2 = 0x2aa;
}
cfi_fixup(mtd, cfi_nopri_fixup_table);
if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
kfree(mtd);
return NULL;
}
} /* CFI mode */
else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
/* Apply jedec specific fixups */
cfi_fixup(mtd, jedec_fixup_table);
}
/* Apply generic fixups */
cfi_fixup(mtd, fixup_table);
for (i=0; i< cfi->numchips; i++) {
cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
/*
* First calculate the timeout max according to timeout field
* of struct cfi_ident that probed from chip's CFI aera, if
* available. Specify a minimum of 2000us, in case the CFI data
* is wrong.
*/
if (cfi->cfiq->BufWriteTimeoutTyp &&
cfi->cfiq->BufWriteTimeoutMax)
cfi->chips[i].buffer_write_time_max =
1 << (cfi->cfiq->BufWriteTimeoutTyp +
cfi->cfiq->BufWriteTimeoutMax);
else
cfi->chips[i].buffer_write_time_max = 0;
cfi->chips[i].buffer_write_time_max =
max(cfi->chips[i].buffer_write_time_max, 2000);
cfi->chips[i].ref_point_counter = 0;
init_waitqueue_head(&(cfi->chips[i].wq));
}
map->fldrv = &cfi_amdstd_chipdrv;
return cfi_amdstd_setup(mtd);
}
struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
unsigned long offset = 0;
int i,j;
printk(KERN_NOTICE "number of %s chips: %d\n",
(cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
/* Select the correct geometry setup */
mtd->size = devsize * cfi->numchips;
mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
sizeof(struct mtd_erase_region_info),
GFP_KERNEL);
if (!mtd->eraseregions)
goto setup_err;
for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
unsigned long ernum, ersize;
ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
if (mtd->erasesize < ersize) {
mtd->erasesize = ersize;
}
for (j=0; j<cfi->numchips; j++) {
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
}
offset += (ersize * ernum);
}
if (offset != devsize) {
/* Argh */
printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
goto setup_err;
}
__module_get(THIS_MODULE);
register_reboot_notifier(&mtd->reboot_notifier);
return mtd;
setup_err:
kfree(mtd->eraseregions);
kfree(mtd);
kfree(cfi->cmdset_priv);
return NULL;
}
/*
* Return true if the chip is ready and has the correct value.
*
* Ready is one of: read mode, query mode, erase-suspend-read mode (in any
* non-suspended sector) and is indicated by no toggle bits toggling.
*
* Error are indicated by toggling bits or bits held with the wrong value,
* or with bits toggling.
*
* Note that anything more complicated than checking if no bits are toggling
* (including checking DQ5 for an error status) is tricky to get working
* correctly and is therefore not done (particularly with interleaved chips
* as each chip must be checked independently of the others).
*/
static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
unsigned long addr, map_word *expected)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word oldd, curd;
int ret;
if (cfi_use_status_reg(cfi)) {
map_word ready = CMD(CFI_SR_DRB);
/*
* For chips that support status register, check device
* ready bit
*/
cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
curd = map_read(map, addr);
return map_word_andequal(map, curd, ready, ready);
}
oldd = map_read(map, addr);
curd = map_read(map, addr);
ret = map_word_equal(map, oldd, curd);
if (!ret || !expected)
return ret;
return map_word_equal(map, curd, *expected);
}
static int __xipram chip_good(struct map_info *map, struct flchip *chip,
unsigned long addr, map_word *expected)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word *datum = expected;
if (cfi->quirks & CFI_QUIRK_DQ_TRUE_DATA)
datum = NULL;
return chip_ready(map, chip, addr, datum);
}
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
{
DECLARE_WAITQUEUE(wait, current);
struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo;
struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
resettime:
timeo = jiffies + HZ;
retry:
switch (chip->state) {
case FL_STATUS:
for (;;) {
if (chip_ready(map, chip, adr, NULL))
break;
if (time_after(jiffies, timeo)) {
printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
return -EIO;
}
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex);
/* Someone else might have been playing with it. */
goto retry;
}
return 0;
case FL_READY:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
return 0;
case FL_ERASING:
if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
!(mode == FL_READY || mode == FL_POINT ||
(mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
goto sleep;
/* Do not allow suspend iff read/write to EB address */
if ((adr & chip->in_progress_block_mask) ==
chip->in_progress_block_addr)
goto sleep;
/* Erase suspend */
/* It's harmless to issue the Erase-Suspend and Erase-Resume
* commands when the erase algorithm isn't in progress. */
map_write(map, CMD(0xB0), chip->in_progress_block_addr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
chip->erase_suspended = 1;
for (;;) {
if (chip_ready(map, chip, adr, NULL))
break;
if (time_after(jiffies, timeo)) {
/* Should have suspended the erase by now.
* Send an Erase-Resume command as either
* there was an error (so leave the erase
* routine to recover from it) or we trying to
* use the erase-in-progress sector. */
put_chip(map, chip, adr);
printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
return -EIO;
}
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex);
/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
chip->state = FL_READY;
return 0;
case FL_XIP_WHILE_ERASING:
if (mode != FL_READY && mode != FL_POINT &&
(!cfip || !(cfip->EraseSuspend&2)))
goto sleep;
chip->oldstate = chip->state;
chip->state = FL_READY;
return 0;
case FL_SHUTDOWN:
/* The machine is rebooting */
return -EIO;
case FL_POINT:
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
fallthrough;
default:
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
goto resettime;
}
}
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
switch(chip->oldstate) {
case FL_ERASING:
cfi_fixup_m29ew_erase_suspend(map,
chip->in_progress_block_addr);
map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
cfi_fixup_m29ew_delay_after_resume(cfi);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
break;
case FL_XIP_WHILE_ERASING:
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
break;
case FL_READY:
case FL_STATUS:
break;
default:
printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
}
wake_up(&chip->wq);
}
#ifdef CONFIG_MTD_XIP
/*
* No interrupt what so ever can be serviced while the flash isn't in array
* mode. This is ensured by the xip_disable() and xip_enable() functions
* enclosing any code path where the flash is known not to be in array mode.
* And within a XIP disabled code path, only functions marked with __xipram
* may be called and nothing else (it's a good thing to inspect generated
* assembly to make sure inline functions were actually inlined and that gcc
* didn't emit calls to its own support functions). Also configuring MTD CFI
* support to a single buswidth and a single interleave is also recommended.
*/
static void xip_disable(struct map_info *map, struct flchip *chip,
unsigned long adr)
{
/* TODO: chips with no XIP use should ignore and return */
(void) map_read(map, adr); /* ensure mmu mapping is up to date */
local_irq_disable();
}
static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
if (chip->state != FL_POINT && chip->state != FL_READY) {
map_write(map, CMD(0xf0), adr);
chip->state = FL_READY;
}
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
}
/*
* When a delay is required for the flash operation to complete, the
* xip_udelay() function is polling for both the given timeout and pending
* (but still masked) hardware interrupts. Whenever there is an interrupt
* pending then the flash erase operation is suspended, array mode restored
* and interrupts unmasked. Task scheduling might also happen at that
* point. The CPU eventually returns from the interrupt or the call to
* schedule() and the suspended flash operation is resumed for the remaining
* of the delay period.
*
* Warning: this function _will_ fool interrupt latency tracing tools.
*/
static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
unsigned long adr, int usec)
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
map_word status, OK = CMD(0x80);
unsigned long suspended, start = xip_currtime();
flstate_t oldstate;
do {
cpu_relax();
if (xip_irqpending() && extp &&
((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
(cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
/*
* Let's suspend the erase operation when supported.
* Note that we currently don't try to suspend
* interleaved chips if there is already another
* operation suspended (imagine what happens
* when one chip was already done with the current
* operation while another chip suspended it, then
* we resume the whole thing at once). Yes, it
* can happen!
*/
map_write(map, CMD(0xb0), adr);
usec -= xip_elapsed_since(start);
suspended = xip_currtime();
do {
if (xip_elapsed_since(suspended) > 100000) {
/*
* The chip doesn't want to suspend
* after waiting for 100 msecs.
* This is a critical error but there
* is not much we can do here.
*/
return;
}
status = map_read(map, adr);
} while (!map_word_andequal(map, status, OK, OK));
/* Suspend succeeded */
oldstate = chip->state;
if (!map_word_bitsset(map, status, CMD(0x40)))
break;
chip->state = FL_XIP_WHILE_ERASING;
chip->erase_suspended = 1;
map_write(map, CMD(0xf0), adr);
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
mutex_unlock(&chip->mutex);
xip_iprefetch();
cond_resched();
/*
* We're back. However someone else might have
* decided to go write to the chip if we are in
* a suspended erase state. If so let's wait
* until it's done.
*/
mutex_lock(&chip->mutex);
while (chip->state != FL_XIP_WHILE_ERASING) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
}
/* Disallow XIP again */
local_irq_disable();
/* Correct Erase Suspend Hangups for M29EW */
cfi_fixup_m29ew_erase_suspend(map, adr);
/* Resume the write or erase operation */
map_write(map, cfi->sector_erase_cmd, adr);
chip->state = oldstate;
start = xip_currtime();
} else if (usec >= 1000000/HZ) {
/*
* Try to save on CPU power when waiting delay
* is at least a system timer tick period.
* No need to be extremely accurate here.
*/
xip_cpu_idle();
}
status = map_read(map, adr);
} while (!map_word_andequal(map, status, OK, OK)
&& xip_elapsed_since(start) < usec);
}
#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
/*
* The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
* the flash is actively programming or erasing since we have to poll for
* the operation to complete anyway. We can't do that in a generic way with
* a XIP setup so do it before the actual flash operation in this case
* and stub it out from INVALIDATE_CACHE_UDELAY.
*/
#define XIP_INVAL_CACHED_RANGE(map, from, size) \
INVALIDATE_CACHED_RANGE(map, from, size)
#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
UDELAY(map, chip, adr, usec)
/*
* Extra notes:
*
* Activating this XIP support changes the way the code works a bit. For
* example the code to suspend the current process when concurrent access
* happens is never executed because xip_udelay() will always return with the
* same chip state as it was entered with. This is why there is no care for
* the presence of add_wait_queue() or schedule() calls from within a couple
* xip_disable()'d areas of code, like in do_erase_oneblock for example.
* The queueing and scheduling are always happening within xip_udelay().
*
* Similarly, get_chip() and put_chip() just happen to always be executed
* with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
* is in array mode, therefore never executing many cases therein and not
* causing any problem with XIP.
*/
#else
#define xip_disable(map, chip, adr)
#define xip_enable(map, chip, adr)
#define XIP_INVAL_CACHED_RANGE(x...)
#define UDELAY(map, chip, adr, usec) \
do { \
mutex_unlock(&chip->mutex); \
cfi_udelay(usec); \
mutex_lock(&chip->mutex); \
} while (0)
#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
do { \
mutex_unlock(&chip->mutex); \
INVALIDATE_CACHED_RANGE(map, adr, len); \
cfi_udelay(usec); \
mutex_lock(&chip->mutex); \
} while (0)
#endif
static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
{
unsigned long cmd_addr;
struct cfi_private *cfi = map->fldrv_priv;
int ret;
adr += chip->start;
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
if (chip->state != FL_POINT && chip->state != FL_READY) {
map_write(map, CMD(0xf0), cmd_addr);
chip->state = FL_READY;
}
map_copy_from(map, buf, adr, len);
put_chip(map, chip, cmd_addr);
mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs;
int chipnum;
int ret = 0;
/* ofs: offset within the first chip that the first read should start */
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
while (len) {
unsigned long thislen;
if (chipnum >= cfi->numchips)
break;
if ((len + ofs -1) >> cfi->chipshift)
thislen = (1<<cfi->chipshift) - ofs;
else
thislen = len;
ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
if (ret)
break;
*retlen += thislen;
len -= thislen;
buf += thislen;
ofs = 0;
chipnum++;
}
return ret;
}
typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
loff_t adr, size_t len, u_char *buf, size_t grouplen);
static inline void otp_enter(struct map_info *map, struct flchip *chip,
loff_t adr, size_t len)
{
struct cfi_private *cfi = map->fldrv_priv;
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
}
static inline void otp_exit(struct map_info *map, struct flchip *chip,
loff_t adr, size_t len)
{
struct cfi_private *cfi = map->fldrv_priv;
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
}
static inline int do_read_secsi_onechip(struct map_info *map,
struct flchip *chip, loff_t adr,
size_t len, u_char *buf,
size_t grouplen)
{
DECLARE_WAITQUEUE(wait, current);
retry:
mutex_lock(&chip->mutex);
if (chip->state != FL_READY){
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
goto retry;
}
adr += chip->start;
chip->state = FL_READY;
otp_enter(map, chip, adr, len);
map_copy_from(map, buf, adr, len);
otp_exit(map, chip, adr, len);
wake_up(&chip->wq);
mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs;
int chipnum;
int ret = 0;
/* ofs: offset within the first chip that the first read should start */
/* 8 secsi bytes per chip */
chipnum=from>>3;
ofs=from & 7;
while (len) {
unsigned long thislen;
if (chipnum >= cfi->numchips)
break;
if ((len + ofs -1) >> 3)
thislen = (1<<3) - ofs;
else
thislen = len;
ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
thislen, buf, 0);
if (ret)
break;
*retlen += thislen;
len -= thislen;
buf += thislen;
ofs = 0;
chipnum++;
}
return ret;
}
static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
unsigned long adr, map_word datum,
int mode);
static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
size_t len, u_char *buf, size_t grouplen)
{
int ret;
while (len) {
unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
int gap = adr - bus_ofs;
int n = min_t(int, len, map_bankwidth(map) - gap);
map_word datum = map_word_ff(map);
if (n != map_bankwidth(map)) {
/* partial write of a word, load old contents */
otp_enter(map, chip, bus_ofs, map_bankwidth(map));
datum = map_read(map, bus_ofs);
otp_exit(map, chip, bus_ofs, map_bankwidth(map));
}
datum = map_word_load_partial(map, datum, buf, gap, n);
ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
if (ret)
return ret;
adr += n;
buf += n;
len -= n;
}
return 0;
}
static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
size_t len, u_char *buf, size_t grouplen)
{
struct cfi_private *cfi = map->fldrv_priv;
uint8_t lockreg;
unsigned long timeo;
int ret;
/* make sure area matches group boundaries */
if ((adr != 0) || (len != grouplen))
return -EINVAL;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_LOCKING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
chip->state = FL_LOCKING;
/* Enter lock register command */
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
/* read lock register */
lockreg = cfi_read_query(map, 0);
/* set bit 0 to protect extended memory block */
lockreg &= ~0x01;
/* set bit 0 to protect extended memory block */
/* write lock register */
map_write(map, CMD(0xA0), chip->start);
map_write(map, CMD(lockreg), chip->start);
/* wait for chip to become ready */
timeo = jiffies + msecs_to_jiffies(2);
for (;;) {
if (chip_ready(map, chip, adr, NULL))
break;
if (time_after(jiffies, timeo)) {
pr_err("Waiting for chip to be ready timed out.\n");
ret = -EIO;
break;
}
UDELAY(map, chip, 0, 1);
}
/* exit protection commands */
map_write(map, CMD(0x90), chip->start);
map_write(map, CMD(0x00), chip->start);
chip->state = FL_READY;
put_chip(map, chip, chip->start);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf,
otp_op_t action, int user_regs)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int ofs_factor = cfi->interleave * cfi->device_type;
unsigned long base;
int chipnum;
struct flchip *chip;
uint8_t otp, lockreg;
int ret;
size_t user_size, factory_size, otpsize;
loff_t user_offset, factory_offset, otpoffset;
int user_locked = 0, otplocked;
*retlen = 0;
for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
chip = &cfi->chips[chipnum];
factory_size = 0;
user_size = 0;
/* Micron M29EW family */
if (is_m29ew(cfi)) {
base = chip->start;
/* check whether secsi area is factory locked
or user lockable */
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, base, FL_CFI_QUERY);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
cfi_qry_mode_on(base, map, cfi);
otp = cfi_read_query(map, base + 0x3 * ofs_factor);
cfi_qry_mode_off(base, map, cfi);
put_chip(map, chip, base);
mutex_unlock(&chip->mutex);
if (otp & 0x80) {
/* factory locked */
factory_offset = 0;
factory_size = 0x100;
} else {
/* customer lockable */
user_offset = 0;
user_size = 0x100;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, base, FL_LOCKING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
/* Enter lock register command */
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
chip->start, map, cfi,
cfi->device_type, NULL);
/* read lock register */
lockreg = cfi_read_query(map, 0);
/* exit protection commands */
map_write(map, CMD(0x90), chip->start);
map_write(map, CMD(0x00), chip->start);
put_chip(map, chip, chip->start);
mutex_unlock(&chip->mutex);
user_locked = ((lockreg & 0x01) == 0x00);
}
}
otpsize = user_regs ? user_size : factory_size;
if (!otpsize)
continue;
otpoffset = user_regs ? user_offset : factory_offset;
otplocked = user_regs ? user_locked : 1;
if (!action) {
/* return otpinfo */
struct otp_info *otpinfo;
len -= sizeof(*otpinfo);
if (len <= 0)
return -ENOSPC;
otpinfo = (struct otp_info *)buf;
otpinfo->start = from;
otpinfo->length = otpsize;
otpinfo->locked = otplocked;
buf += sizeof(*otpinfo);
*retlen += sizeof(*otpinfo);
from += otpsize;
} else if ((from < otpsize) && (len > 0)) {
size_t size;
size = (len < otpsize - from) ? len : otpsize - from;
ret = action(map, chip, otpoffset + from, size, buf,
otpsize);
if (ret < 0)
return ret;
buf += size;
len -= size;
*retlen += size;
from = 0;
} else {
from -= otpsize;
}
}
return 0;
}
static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *buf)
{
return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
NULL, 0);
}
static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *buf)
{
return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
NULL, 1);
}
static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen,
u_char *buf)
{
return cfi_amdstd_otp_walk(mtd, from, len, retlen,
buf, do_read_secsi_onechip, 0);
}
static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen,
u_char *buf)
{
return cfi_amdstd_otp_walk(mtd, from, len, retlen,
buf, do_read_secsi_onechip, 1);
}
static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen,
const u_char *buf)
{
return cfi_amdstd_otp_walk(mtd, from, len, retlen, (u_char *)buf,
do_otp_write, 1);
}
static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
size_t retlen;
return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
do_otp_lock, 1);
}
static int __xipram do_write_oneword_once(struct map_info *map,
struct flchip *chip,
unsigned long adr, map_word datum,
int mode, struct cfi_private *cfi)
{
unsigned long timeo;
/*
* We use a 1ms + 1 jiffies generic timeout for writes (most devices
* have a max write time of a few hundreds usec). However, we should
* use the maximum timeout value given by the chip at probe time
* instead. Unfortunately, struct flchip does have a field for
* maximum timeout, only for typical which can be far too short
* depending of the conditions. The ' + 1' is to avoid having a
* timeout of 0 jiffies if HZ is smaller than 1000.
*/
unsigned long uWriteTimeout = (HZ / 1000) + 1;
int ret = 0;
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
map_write(map, datum, adr);
chip->state = mode;
INVALIDATE_CACHE_UDELAY(map, chip,
adr, map_bankwidth(map),
chip->word_write_time);
/* See comment above for timeout value. */
timeo = jiffies + uWriteTimeout;
for (;;) {
if (chip->state != mode) {
/* Someone's suspended the write. Sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
mutex_lock(&chip->mutex);
continue;
}
/*
* We check "time_after" and "!chip_good" before checking
* "chip_good" to avoid the failure due to scheduling.
*/
if (time_after(jiffies, timeo) &&
!chip_good(map, chip, adr, &datum)) {
xip_enable(map, chip, adr);
printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
xip_disable(map, chip, adr);
ret = -EIO;
break;
}
if (chip_good(map, chip, adr, &datum)) {
if (cfi_check_err_status(map, chip, adr))
ret = -EIO;
break;
}
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1);
}
return ret;
}
static int __xipram do_write_oneword_start(struct map_info *map,
struct flchip *chip,
unsigned long adr, int mode)
{
int ret;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, mode);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
if (mode == FL_OTP_WRITE)
otp_enter(map, chip, adr, map_bankwidth(map));
return ret;
}
static void __xipram do_write_oneword_done(struct map_info *map,
struct flchip *chip,
unsigned long adr, int mode)
{
if (mode == FL_OTP_WRITE)
otp_exit(map, chip, adr, map_bankwidth(map));
chip->state = FL_READY;
DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
}
static int __xipram do_write_oneword_retry(struct map_info *map,
struct flchip *chip,
unsigned long adr, map_word datum,
int mode)
{
struct cfi_private *cfi = map->fldrv_priv;
int ret = 0;
map_word oldd;
int retry_cnt = 0;
/*
* Check for a NOP for the case when the datum to write is already
* present - it saves time and works around buggy chips that corrupt
* data at other locations when 0xff is written to a location that
* already contains 0xff.
*/
oldd = map_read(map, adr);
if (map_word_equal(map, oldd, datum)) {
pr_debug("MTD %s(): NOP\n", __func__);
return ret;
}
XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
ENABLE_VPP(map);
xip_disable(map, chip, adr);
retry:
ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
if (ret) {
/* reset on all failures. */
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
if (++retry_cnt <= MAX_RETRIES) {
ret = 0;
goto retry;
}
}
xip_enable(map, chip, adr);
return ret;
}
static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
unsigned long adr, map_word datum,
int mode)
{
int ret;
adr += chip->start;
pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr,
datum.x[0]);
ret = do_write_oneword_start(map, chip, adr, mode);
if (ret)
return ret;
ret = do_write_oneword_retry(map, chip, adr, datum, mode);
do_write_oneword_done(map, chip, adr, mode);
return ret;
}
static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int ret;
int chipnum;
unsigned long ofs, chipstart;
DECLARE_WAITQUEUE(wait, current);
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
chipstart = cfi->chips[chipnum].start;
/* If it's not bus-aligned, do the first byte write */
if (ofs & (map_bankwidth(map)-1)) {
unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
int i = ofs - bus_ofs;
int n = 0;
map_word tmp_buf;
retry:
mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
mutex_unlock(&cfi->chips[chipnum].mutex);
schedule();
remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
goto retry;
}
/* Load 'tmp_buf' with old contents of flash */
tmp_buf = map_read(map, bus_ofs+chipstart);
mutex_unlock(&cfi->chips[chipnum].mutex);
/* Number of bytes to copy from buffer */
n = min_t(int, len, map_bankwidth(map)-i);
tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
ret = do_write_oneword(map, &cfi->chips[chipnum],
bus_ofs, tmp_buf, FL_WRITING);
if (ret)
return ret;
ofs += n;
buf += n;
(*retlen) += n;
len -= n;
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
/* We are now aligned, write as much as possible */
while(len >= map_bankwidth(map)) {
map_word datum;
datum = map_word_load(map, buf);
ret = do_write_oneword(map, &cfi->chips[chipnum],
ofs, datum, FL_WRITING);
if (ret)
return ret;
ofs += map_bankwidth(map);
buf += map_bankwidth(map);
(*retlen) += map_bankwidth(map);
len -= map_bankwidth(map);
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
chipstart = cfi->chips[chipnum].start;
}
}
/* Write the trailing bytes if any */
if (len & (map_bankwidth(map)-1)) {
map_word tmp_buf;
retry1:
mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
mutex_unlock(&cfi->chips[chipnum].mutex);
schedule();
remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
goto retry1;
}
tmp_buf = map_read(map, ofs + chipstart);
mutex_unlock(&cfi->chips[chipnum].mutex);
tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
ret = do_write_oneword(map, &cfi->chips[chipnum],
ofs, tmp_buf, FL_WRITING);
if (ret)
return ret;
(*retlen) += len;
}
return 0;
}
#if !FORCE_WORD_WRITE
static int __xipram do_write_buffer_wait(struct map_info *map,
struct flchip *chip, unsigned long adr,
map_word datum)
{
unsigned long timeo;
unsigned long u_write_timeout;
int ret = 0;
/*
* Timeout is calculated according to CFI data, if available.
* See more comments in cfi_cmdset_0002().
*/
u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max);
timeo = jiffies + u_write_timeout;
for (;;) {
if (chip->state != FL_WRITING) {
/* Someone's suspended the write. Sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
mutex_lock(&chip->mutex);
continue;
}
/*
* We check "time_after" and "!chip_good" before checking
* "chip_good" to avoid the failure due to scheduling.
*/
if (time_after(jiffies, timeo) &&
!chip_good(map, chip, adr, &datum)) {
pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
__func__, adr);
ret = -EIO;
break;
}
if (chip_good(map, chip, adr, &datum)) {
if (cfi_check_err_status(map, chip, adr))
ret = -EIO;
break;
}
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1);
}
return ret;
}
static void __xipram do_write_buffer_reset(struct map_info *map,
struct flchip *chip,
struct cfi_private *cfi)
{
/*
* Recovery from write-buffer programming failures requires
* the write-to-buffer-reset sequence. Since the last part
* of the sequence also works as a normal reset, we can run
* the same commands regardless of why we are here.
* See e.g.
* http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
*/
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
/* FIXME - should have reset delay before continuing */
}
/*
* FIXME: interleaved mode not tested, and probably not supported!
*/
static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
unsigned long adr, const u_char *buf,
int len)
{
struct cfi_private *cfi = map->fldrv_priv;
int ret;
unsigned long cmd_adr;
int z, words;
map_word datum;
adr += chip->start;
cmd_adr = adr;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
datum = map_word_load(map, buf);
pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
__func__, adr, datum.x[0]);
XIP_INVAL_CACHED_RANGE(map, adr, len);
ENABLE_VPP(map);
xip_disable(map, chip, cmd_adr);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
/* Write Buffer Load */
map_write(map, CMD(0x25), cmd_adr);
chip->state = FL_WRITING_TO_BUFFER;
/* Write length of data to come */
words = len / map_bankwidth(map);
map_write(map, CMD(words - 1), cmd_adr);
/* Write data */
z = 0;
while(z < words * map_bankwidth(map)) {
datum = map_word_load(map, buf);
map_write(map, datum, adr + z);
z += map_bankwidth(map);
buf += map_bankwidth(map);
}
z -= map_bankwidth(map);
adr += z;
/* Write Buffer Program Confirm: GO GO GO */
map_write(map, CMD(0x29), cmd_adr);
chip->state = FL_WRITING;
INVALIDATE_CACHE_UDELAY(map, chip,
adr, map_bankwidth(map),
chip->word_write_time);
ret = do_write_buffer_wait(map, chip, adr, datum);
if (ret)
do_write_buffer_reset(map, chip, cfi);
xip_enable(map, chip, adr);
chip->state = FL_READY;
DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
int ret;
int chipnum;
unsigned long ofs;
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
/* If it's not bus-aligned, do the first word write */
if (ofs & (map_bankwidth(map)-1)) {
size_t local_len = (-ofs)&(map_bankwidth(map)-1);
if (local_len > len)
local_len = len;
ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
local_len, retlen, buf);
if (ret)
return ret;
ofs += local_len;
buf += local_len;
len -= local_len;
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
/* Write buffer is worth it only if more than one word to write... */
while (len >= map_bankwidth(map) * 2) {
/* We must not cross write block boundaries */
int size = wbufsize - (ofs & (wbufsize-1));
if (size > len)
size = len;
if (size % map_bankwidth(map))
size -= size % map_bankwidth(map);
ret = do_write_buffer(map, &cfi->chips[chipnum],
ofs, buf, size);
if (ret)
return ret;
ofs += size;
buf += size;
(*retlen) += size;
len -= size;
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
if (len) {
size_t retlen_dregs = 0;
ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
len, &retlen_dregs, buf);
*retlen += retlen_dregs;
return ret;
}
return 0;
}
#endif /* !FORCE_WORD_WRITE */
/*
* Wait for the flash chip to become ready to write data
*
* This is only called during the panic_write() path. When panic_write()
* is called, the kernel is in the process of a panic, and will soon be
* dead. Therefore we don't take any locks, and attempt to get access
* to the chip as soon as possible.
*/
static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
int retries = 10;
int i;
/*
* If the driver thinks the chip is idle, and no toggle bits
* are changing, then the chip is actually idle for sure.
*/
if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL))
return 0;
/*
* Try several times to reset the chip and then wait for it
* to become idle. The upper limit of a few milliseconds of
* delay isn't a big problem: the kernel is dying anyway. It
* is more important to save the messages.
*/
while (retries > 0) {
const unsigned long timeo = (HZ / 1000) + 1;
/* send the reset command */
map_write(map, CMD(0xF0), chip->start);
/* wait for the chip to become ready */
for (i = 0; i < jiffies_to_usecs(timeo); i++) {
if (chip_ready(map, chip, adr, NULL))
return 0;
udelay(1);
}
retries--;
}
/* the chip never became ready */
return -EBUSY;
}
/*
* Write out one word of data to a single flash chip during a kernel panic
*
* This is only called during the panic_write() path. When panic_write()
* is called, the kernel is in the process of a panic, and will soon be
* dead. Therefore we don't take any locks, and attempt to get access
* to the chip as soon as possible.
*
* The implementation of this routine is intentionally similar to
* do_write_oneword(), in order to ease code maintenance.
*/
static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
unsigned long adr, map_word datum)
{
const unsigned long uWriteTimeout = (HZ / 1000) + 1;
struct cfi_private *cfi = map->fldrv_priv;
int retry_cnt = 0;
map_word oldd;
int ret;
int i;
adr += chip->start;
ret = cfi_amdstd_panic_wait(map, chip, adr);
if (ret)
return ret;
pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
__func__, adr, datum.x[0]);
/*
* Check for a NOP for the case when the datum to write is already
* present - it saves time and works around buggy chips that corrupt
* data at other locations when 0xff is written to a location that
* already contains 0xff.
*/
oldd = map_read(map, adr);
if (map_word_equal(map, oldd, datum)) {
pr_debug("MTD %s(): NOP\n", __func__);
goto op_done;
}
ENABLE_VPP(map);
retry:
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
map_write(map, datum, adr);
for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
if (chip_ready(map, chip, adr, NULL))
break;
udelay(1);
}
if (!chip_ready(map, chip, adr, &datum) ||
cfi_check_err_status(map, chip, adr)) {
/* reset on all failures. */
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
if (++retry_cnt <= MAX_RETRIES)
goto retry;
ret = -EIO;
}
op_done:
DISABLE_VPP(map);
return ret;
}
/*
* Write out some data during a kernel panic
*
* This is used by the mtdoops driver to save the dying messages from a
* kernel which has panic'd.
*
* This routine ignores all of the locking used throughout the rest of the
* driver, in order to ensure that the data gets written out no matter what
* state this driver (and the flash chip itself) was in when the kernel crashed.
*
* The implementation of this routine is intentionally similar to
* cfi_amdstd_write_words(), in order to ease code maintenance.
*/
static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs, chipstart;
int ret;
int chipnum;
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
chipstart = cfi->chips[chipnum].start;
/* If it's not bus aligned, do the first byte write */
if (ofs & (map_bankwidth(map) - 1)) {
unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
int i = ofs - bus_ofs;
int n = 0;
map_word tmp_buf;
ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
if (ret)
return ret;
/* Load 'tmp_buf' with old contents of flash */
tmp_buf = map_read(map, bus_ofs + chipstart);
/* Number of bytes to copy from buffer */
n = min_t(int, len, map_bankwidth(map) - i);
tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
bus_ofs, tmp_buf);
if (ret)
return ret;
ofs += n;
buf += n;
(*retlen) += n;
len -= n;
if (ofs >> cfi->chipshift) {
chipnum++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
/* We are now aligned, write as much as possible */
while (len >= map_bankwidth(map)) {
map_word datum;
datum = map_word_load(map, buf);
ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
ofs, datum);
if (ret)
return ret;
ofs += map_bankwidth(map);
buf += map_bankwidth(map);
(*retlen) += map_bankwidth(map);
len -= map_bankwidth(map);
if (ofs >> cfi->chipshift) {
chipnum++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
chipstart = cfi->chips[chipnum].start;
}
}
/* Write the trailing bytes if any */
if (len & (map_bankwidth(map) - 1)) {
map_word tmp_buf;
ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
if (ret)
return ret;
tmp_buf = map_read(map, ofs + chipstart);
tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
ofs, tmp_buf);
if (ret)
return ret;
(*retlen) += len;
}
return 0;
}
/*
* Handle devices with one erase region, that only implement
* the chip erase command.
*/
static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
{
struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo = jiffies + HZ;
unsigned long int adr;
DECLARE_WAITQUEUE(wait, current);
int ret;
int retry_cnt = 0;
map_word datum = map_word_ff(map);
adr = cfi->addr_unlock1;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
pr_debug("MTD %s(): ERASE 0x%.8lx\n",
__func__, chip->start);
XIP_INVAL_CACHED_RANGE(map, adr, map->size);
ENABLE_VPP(map);
xip_disable(map, chip, adr);
retry:
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
chip->in_progress_block_mask = ~(map->size - 1);
INVALIDATE_CACHE_UDELAY(map, chip,
adr, map->size,
chip->erase_time*500);
timeo = jiffies + (HZ*20);
for (;;) {
if (chip->state != FL_ERASING) {
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
continue;
}
if (chip->erase_suspended) {
/* This erase was suspended and resumed.
Adjust the timeout */
timeo = jiffies + (HZ*20); /* FIXME */
chip->erase_suspended = 0;
}
if (chip_ready(map, chip, adr, &datum)) {
if (cfi_check_err_status(map, chip, adr))
ret = -EIO;
break;
}
if (time_after(jiffies, timeo)) {
printk(KERN_WARNING "MTD %s(): software timeout\n",
__func__);
ret = -EIO;
break;
}
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1000000/HZ);
}
/* Did we succeed? */
if (ret) {
/* reset on all failures. */
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
if (++retry_cnt <= MAX_RETRIES) {
ret = 0;
goto retry;
}
}
chip->state = FL_READY;
xip_enable(map, chip, adr);
DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo = jiffies + HZ;
DECLARE_WAITQUEUE(wait, current);
int ret;
int retry_cnt = 0;
map_word datum = map_word_ff(map);
adr += chip->start;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
pr_debug("MTD %s(): ERASE 0x%.8lx\n",
__func__, adr);
XIP_INVAL_CACHED_RANGE(map, adr, len);
ENABLE_VPP(map);
xip_disable(map, chip, adr);
retry:
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
map_write(map, cfi->sector_erase_cmd, adr);
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
chip->in_progress_block_mask = ~(len - 1);
INVALIDATE_CACHE_UDELAY(map, chip,
adr, len,
chip->erase_time*500);
timeo = jiffies + (HZ*20);
for (;;) {
if (chip->state != FL_ERASING) {
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
continue;
}
if (chip->erase_suspended) {
/* This erase was suspended and resumed.
Adjust the timeout */
timeo = jiffies + (HZ*20); /* FIXME */
chip->erase_suspended = 0;
}
if (chip_ready(map, chip, adr, &datum)) {
if (cfi_check_err_status(map, chip, adr))
ret = -EIO;
break;
}
if (time_after(jiffies, timeo)) {
printk(KERN_WARNING "MTD %s(): software timeout\n",
__func__);
ret = -EIO;
break;
}
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1000000/HZ);
}
/* Did we succeed? */
if (ret) {
/* reset on all failures. */
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
if (++retry_cnt <= MAX_RETRIES) {
ret = 0;
goto retry;
}
}
chip->state = FL_READY;
xip_enable(map, chip, adr);
DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
{
return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
instr->len, NULL);
}
static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
if (instr->addr != 0)
return -EINVAL;
if (instr->len != mtd->size)
return -EINVAL;
return do_erase_chip(map, &cfi->chips[0]);
}
static int do_atmel_lock(struct map_info *map, struct flchip *chip,
unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
int ret;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
if (ret)
goto out_unlock;
chip->state = FL_LOCKING;
pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
cfi->device_type, NULL);
map_write(map, CMD(0x40), chip->start + adr);
chip->state = FL_READY;
put_chip(map, chip, adr + chip->start);
ret = 0;
out_unlock:
mutex_unlock(&chip->mutex);
return ret;
}
static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
int ret;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
if (ret)
goto out_unlock;
chip->state = FL_UNLOCKING;
pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
map_write(map, CMD(0x70), adr);
chip->state = FL_READY;
put_chip(map, chip, adr + chip->start);
ret = 0;
out_unlock:
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
}
static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
}
/*
* Advanced Sector Protection - PPB (Persistent Protection Bit) locking
*/
struct ppb_lock {
struct flchip *chip;
unsigned long adr;
int locked;
};
#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
static int __maybe_unused do_ppb_xxlock(struct map_info *map,
struct flchip *chip,
unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo;
int ret;
adr += chip->start;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
cfi->device_type, NULL);
/* PPB entry command */
cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
chip->state = FL_LOCKING;
map_write(map, CMD(0xA0), adr);
map_write(map, CMD(0x00), adr);
} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
/*
* Unlocking of one specific sector is not supported, so we
* have to unlock all sectors of this device instead
*/
chip->state = FL_UNLOCKING;
map_write(map, CMD(0x80), chip->start);
map_write(map, CMD(0x30), chip->start);
} else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
chip->state = FL_JEDEC_QUERY;
/* Return locked status: 0->locked, 1->unlocked */
ret = !cfi_read_query(map, adr);
} else
BUG();
/*
* Wait for some time as unlocking of all sectors takes quite long
*/
timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
for (;;) {
if (chip_ready(map, chip, adr, NULL))
break;
if (time_after(jiffies, timeo)) {
printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
ret = -EIO;
break;
}
UDELAY(map, chip, adr, 1);
}
/* Exit BC commands */
map_write(map, CMD(0x90), chip->start);
map_write(map, CMD(0x00), chip->start);
chip->state = FL_READY;
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
uint64_t len)
{
return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
DO_XXLOCK_ONEBLOCK_LOCK);
}
static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
uint64_t len)
{
struct mtd_erase_region_info *regions = mtd->eraseregions;
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct ppb_lock *sect;
unsigned long adr;
loff_t offset;
uint64_t length;
int chipnum;
int i;
int sectors;
int ret;
int max_sectors;
/*
* PPB unlocking always unlocks all sectors of the flash chip.
* We need to re-lock all previously locked sectors. So lets
* first check the locking status of all sectors and save
* it for future use.
*/
max_sectors = 0;
for (i = 0; i < mtd->numeraseregions; i++)
max_sectors += regions[i].numblocks;
sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL);
if (!sect)
return -ENOMEM;
/*
* This code to walk all sectors is a slightly modified version
* of the cfi_varsize_frob() code.
*/
i = 0;
chipnum = 0;
adr = 0;
sectors = 0;
offset = 0;
length = mtd->size;
while (length) {
int size = regions[i].erasesize;
/*
* Only test sectors that shall not be unlocked. The other
* sectors shall be unlocked, so lets keep their locking
* status at "unlocked" (locked=0) for the final re-locking.
*/
if ((offset < ofs) || (offset >= (ofs + len))) {
sect[sectors].chip = &cfi->chips[chipnum];
sect[sectors].adr = adr;
sect[sectors].locked = do_ppb_xxlock(
map, &cfi->chips[chipnum], adr, 0,
DO_XXLOCK_ONEBLOCK_GETLOCK);
}
adr += size;
offset += size;
length -= size;
if (offset == regions[i].offset + size * regions[i].numblocks)
i++;
if (adr >> cfi->chipshift) {
if (offset >= (ofs + len))
break;
adr = 0;
chipnum++;
if (chipnum >= cfi->numchips)
break;
}
sectors++;
if (sectors >= max_sectors) {
printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
max_sectors);
kfree(sect);
return -EINVAL;
}
}
/* Now unlock the whole chip */
ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
DO_XXLOCK_ONEBLOCK_UNLOCK);
if (ret) {
kfree(sect);
return ret;
}
/*
* PPB unlocking always unlocks all sectors of the flash chip.
* We need to re-lock all previously locked sectors.
*/
for (i = 0; i < sectors; i++) {
if (sect[i].locked)
do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
DO_XXLOCK_ONEBLOCK_LOCK);
}
kfree(sect);
return ret;
}
static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
uint64_t len)
{
return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
}
static void cfi_amdstd_sync (struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i;
struct flchip *chip;
int ret = 0;
DECLARE_WAITQUEUE(wait, current);
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
retry:
mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
case FL_STATUS:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
chip->oldstate = chip->state;
chip->state = FL_SYNCING;
/* No need to wake_up() on this state change -
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
fallthrough;
case FL_SYNCING:
mutex_unlock(&chip->mutex);
break;
default:
/* Not an idle state */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
goto retry;
}
}
/* Unlock the chips again */
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
mutex_unlock(&chip->mutex);
}
}
static int cfi_amdstd_suspend(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i;
struct flchip *chip;
int ret = 0;
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
case FL_STATUS:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
chip->oldstate = chip->state;
chip->state = FL_PM_SUSPENDED;
/* No need to wake_up() on this state change -
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
break;
case FL_PM_SUSPENDED:
break;
default:
ret = -EAGAIN;
break;
}
mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
if (ret) {
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
mutex_unlock(&chip->mutex);
}
}
return ret;
}
static void cfi_amdstd_resume(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i;
struct flchip *chip;
for (i=0; i<cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
chip->state = FL_READY;
map_write(map, CMD(0xF0), chip->start);
wake_up(&chip->wq);
}
else
printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
mutex_unlock(&chip->mutex);
}
}
/*
* Ensure that the flash device is put back into read array mode before
* unloading the driver or rebooting. On some systems, rebooting while
* the flash is in query/program/erase mode will prevent the CPU from
* fetching the bootloader code, requiring a hard reset or power cycle.
*/
static int cfi_amdstd_reset(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i, ret;
struct flchip *chip;
for (i = 0; i < cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
if (!ret) {
map_write(map, CMD(0xF0), chip->start);
chip->state = FL_SHUTDOWN;
put_chip(map, chip, chip->start);
}
mutex_unlock(&chip->mutex);
}
return 0;
}
static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
void *v)
{
struct mtd_info *mtd;
mtd = container_of(nb, struct mtd_info, reboot_notifier);
cfi_amdstd_reset(mtd);
return NOTIFY_DONE;
}
static void cfi_amdstd_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
cfi_amdstd_reset(mtd);
unregister_reboot_notifier(&mtd->reboot_notifier);
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
kfree(cfi);
kfree(mtd->eraseregions);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Crossnet Co. <[email protected]> et al.");
MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
MODULE_ALIAS("cfi_cmdset_0006");
MODULE_ALIAS("cfi_cmdset_0701");
| linux-master | drivers/mtd/chips/cfi_cmdset_0002.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common code to handle map devices which are simple RAM
* (C) 2000 Red Hat.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
static int mapram_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int mapram_erase (struct mtd_info *, struct erase_info *);
static void mapram_nop (struct mtd_info *);
static struct mtd_info *map_ram_probe(struct map_info *map);
static int mapram_point (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys);
static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
static struct mtd_chip_driver mapram_chipdrv = {
.probe = map_ram_probe,
.name = "map_ram",
.module = THIS_MODULE
};
static struct mtd_info *map_ram_probe(struct map_info *map)
{
struct mtd_info *mtd;
/* Check the first byte is RAM */
#if 0
map_write8(map, 0x55, 0);
if (map_read8(map, 0) != 0x55)
return NULL;
map_write8(map, 0xAA, 0);
if (map_read8(map, 0) != 0xAA)
return NULL;
/* Check the last byte is RAM */
map_write8(map, 0x55, map->size-1);
if (map_read8(map, map->size-1) != 0x55)
return NULL;
map_write8(map, 0xAA, map->size-1);
if (map_read8(map, map->size-1) != 0xAA)
return NULL;
#endif
/* OK. It seems to be RAM. */
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
if (!mtd)
return NULL;
map->fldrv = &mapram_chipdrv;
mtd->priv = map;
mtd->name = map->name;
mtd->type = MTD_RAM;
mtd->size = map->size;
mtd->_erase = mapram_erase;
mtd->_read = mapram_read;
mtd->_write = mapram_write;
mtd->_panic_write = mapram_write;
mtd->_point = mapram_point;
mtd->_sync = mapram_nop;
mtd->_unpoint = mapram_unpoint;
mtd->flags = MTD_CAP_RAM;
mtd->writesize = 1;
mtd->erasesize = PAGE_SIZE;
while(mtd->size & (mtd->erasesize - 1))
mtd->erasesize >>= 1;
__module_get(THIS_MODULE);
return mtd;
}
static int mapram_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
if (!map->virt)
return -EINVAL;
*virt = map->virt + from;
if (phys)
*phys = map->phys + from;
*retlen = len;
return 0;
}
static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
return 0;
}
static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
map_copy_from(map, buf, from, len);
*retlen = len;
return 0;
}
static int mapram_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
{
struct map_info *map = mtd->priv;
map_copy_to(map, to, buf, len);
*retlen = len;
return 0;
}
static int mapram_erase (struct mtd_info *mtd, struct erase_info *instr)
{
/* Yeah, it's inefficient. Who cares? It's faster than a _real_
flash erase. */
struct map_info *map = mtd->priv;
map_word allff;
unsigned long i;
allff = map_word_ff(map);
for (i=0; i<instr->len; i += map_bankwidth(map))
map_write(map, allff, instr->addr + i);
return 0;
}
static void mapram_nop(struct mtd_info *mtd)
{
/* Nothing to see here */
}
static int __init map_ram_init(void)
{
register_mtd_chip_driver(&mapram_chipdrv);
return 0;
}
static void __exit map_ram_exit(void)
{
unregister_mtd_chip_driver(&mapram_chipdrv);
}
module_init(map_ram_init);
module_exit(map_ram_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]>");
MODULE_DESCRIPTION("MTD chip driver for RAM chips");
| linux-master | drivers/mtd/chips/map_ram.c |
// SPDX-License-Identifier: GPL-2.0
/*
Common Flash Interface probe code.
(C) 2000 Red Hat.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/mtd/xip.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/gen_probe.h>
//#define DEBUG_CFI
#ifdef DEBUG_CFI
static void print_cfi_ident(struct cfi_ident *);
#endif
static int cfi_probe_chip(struct map_info *map, __u32 base,
unsigned long *chip_map, struct cfi_private *cfi);
static int cfi_chip_setup(struct map_info *map, struct cfi_private *cfi);
struct mtd_info *cfi_probe(struct map_info *map);
#ifdef CONFIG_MTD_XIP
/* only needed for short periods, so this is rather simple */
#define xip_disable() local_irq_disable()
#define xip_allowed(base, map) \
do { \
(void) map_read(map, base); \
xip_iprefetch(); \
local_irq_enable(); \
} while (0)
#define xip_enable(base, map, cfi) \
do { \
cfi_qry_mode_off(base, map, cfi); \
xip_allowed(base, map); \
} while (0)
#define xip_disable_qry(base, map, cfi) \
do { \
xip_disable(); \
cfi_qry_mode_on(base, map, cfi); \
} while (0)
#else
#define xip_disable() do { } while (0)
#define xip_allowed(base, map) do { } while (0)
#define xip_enable(base, map, cfi) do { } while (0)
#define xip_disable_qry(base, map, cfi) do { } while (0)
#endif
/*
* This fixup occurs immediately after reading the CFI structure and can affect
* the number of chips detected, unlike cfi_fixup, which occurs after an
* mtd_info structure has been created for the chip.
*/
struct cfi_early_fixup {
uint16_t mfr;
uint16_t id;
void (*fixup)(struct cfi_private *cfi);
};
static void cfi_early_fixup(struct cfi_private *cfi,
const struct cfi_early_fixup *fixups)
{
const struct cfi_early_fixup *f;
for (f = fixups; f->fixup; f++) {
if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
f->fixup(cfi);
}
}
}
/* check for QRY.
in: interleave,type,mode
ret: table index, <0 for error
*/
static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
unsigned long *chip_map, struct cfi_private *cfi)
{
int i;
if ((base + 0) >= map->size) {
printk(KERN_NOTICE
"Probe at base[0x00](0x%08lx) past the end of the map(0x%08lx)\n",
(unsigned long)base, map->size -1);
return 0;
}
if ((base + 0xff) >= map->size) {
printk(KERN_NOTICE
"Probe at base[0x55](0x%08lx) past the end of the map(0x%08lx)\n",
(unsigned long)base + 0x55, map->size -1);
return 0;
}
xip_disable();
if (!cfi_qry_mode_on(base, map, cfi)) {
xip_enable(base, map, cfi);
return 0;
}
if (!cfi->numchips) {
/* This is the first time we're called. Set up the CFI
stuff accordingly and return */
return cfi_chip_setup(map, cfi);
}
/* Check each previous chip to see if it's an alias */
for (i=0; i < (base >> cfi->chipshift); i++) {
unsigned long start;
if(!test_bit(i, chip_map)) {
/* Skip location; no valid chip at this address */
continue;
}
start = i << cfi->chipshift;
/* This chip should be in read mode if it's one
we've already touched. */
if (cfi_qry_present(map, start, cfi)) {
/* Eep. This chip also had the QRY marker.
* Is it an alias for the new one? */
cfi_qry_mode_off(start, map, cfi);
/* If the QRY marker goes away, it's an alias */
if (!cfi_qry_present(map, start, cfi)) {
xip_allowed(base, map);
printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
map->name, base, start);
return 0;
}
/* Yes, it's actually got QRY for data. Most
* unfortunate. Stick the new chip in read mode
* too and if it's the same, assume it's an alias. */
/* FIXME: Use other modes to do a proper check */
cfi_qry_mode_off(base, map, cfi);
if (cfi_qry_present(map, base, cfi)) {
xip_allowed(base, map);
printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
map->name, base, start);
return 0;
}
}
}
/* OK, if we got to here, then none of the previous chips appear to
be aliases for the current one. */
set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
cfi->numchips++;
/* Put it back into Read Mode */
cfi_qry_mode_off(base, map, cfi);
xip_allowed(base, map);
printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
map->name, cfi->interleave, cfi->device_type*8, base,
map->bankwidth*8);
return 1;
}
static void fixup_s70gl02gs_chips(struct cfi_private *cfi)
{
/*
* S70GL02GS flash reports a single 256 MiB chip, but is really made up
* of two 128 MiB chips with 1024 sectors each.
*/
cfi->cfiq->DevSize = 27;
cfi->cfiq->EraseRegionInfo[0] = 0x20003ff;
pr_warn("Bad S70GL02GS CFI data; adjust to detect 2 chips\n");
}
static const struct cfi_early_fixup cfi_early_fixup_table[] = {
{ CFI_MFR_AMD, 0x4801, fixup_s70gl02gs_chips },
{ },
};
static int __xipram cfi_chip_setup(struct map_info *map,
struct cfi_private *cfi)
{
int ofs_factor = cfi->interleave*cfi->device_type;
__u32 base = 0;
int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
int i;
int addr_unlock1 = 0x555, addr_unlock2 = 0x2AA;
xip_enable(base, map, cfi);
#ifdef DEBUG_CFI
printk("Number of erase regions: %d\n", num_erase_regions);
#endif
if (!num_erase_regions)
return 0;
cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
if (!cfi->cfiq)
return 0;
memset(cfi->cfiq,0,sizeof(struct cfi_ident));
cfi->cfi_mode = CFI_MODE_CFI;
cfi->sector_erase_cmd = CMD(0x30);
/* Read the CFI info structure */
xip_disable_qry(base, map, cfi);
for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
/* Do any necessary byteswapping */
cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID);
cfi->cfiq->P_ADR = le16_to_cpu(cfi->cfiq->P_ADR);
cfi->cfiq->A_ID = le16_to_cpu(cfi->cfiq->A_ID);
cfi->cfiq->A_ADR = le16_to_cpu(cfi->cfiq->A_ADR);
cfi->cfiq->InterfaceDesc = le16_to_cpu(cfi->cfiq->InterfaceDesc);
cfi->cfiq->MaxBufWriteSize = le16_to_cpu(cfi->cfiq->MaxBufWriteSize);
#ifdef DEBUG_CFI
/* Dump the information therein */
print_cfi_ident(cfi->cfiq);
#endif
for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
cfi->cfiq->EraseRegionInfo[i] = le32_to_cpu(cfi->cfiq->EraseRegionInfo[i]);
#ifdef DEBUG_CFI
printk(" Erase Region #%d: BlockSize 0x%4.4X bytes, %d blocks\n",
i, (cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff,
(cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1);
#endif
}
if (cfi->cfiq->P_ID == P_ID_SST_OLD) {
addr_unlock1 = 0x5555;
addr_unlock2 = 0x2AAA;
}
/*
* Note we put the device back into Read Mode BEFORE going into Auto
* Select Mode, as some devices support nesting of modes, others
* don't. This way should always work.
* On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
* so should be treated as nops or illegal (and so put the device
* back into Read Mode, which is a nop in this case).
*/
cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xaa, addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, addr_unlock2, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x90, addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi->mfr = cfi_read_query16(map, base);
cfi->id = cfi_read_query16(map, base + ofs_factor);
/* Get AMD/Spansion extended JEDEC ID */
if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
cfi_read_query(map, base + 0xf * ofs_factor);
/* Put it back into Read Mode */
cfi_qry_mode_off(base, map, cfi);
xip_allowed(base, map);
cfi_early_fixup(cfi, cfi_early_fixup_table);
printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank. Manufacturer ID %#08x Chip ID %#08x\n",
map->name, cfi->interleave, cfi->device_type*8, base,
map->bankwidth*8, cfi->mfr, cfi->id);
return 1;
}
#ifdef DEBUG_CFI
static char *vendorname(__u16 vendor)
{
switch (vendor) {
case P_ID_NONE:
return "None";
case P_ID_INTEL_EXT:
return "Intel/Sharp Extended";
case P_ID_AMD_STD:
return "AMD/Fujitsu Standard";
case P_ID_INTEL_STD:
return "Intel/Sharp Standard";
case P_ID_AMD_EXT:
return "AMD/Fujitsu Extended";
case P_ID_WINBOND:
return "Winbond Standard";
case P_ID_ST_ADV:
return "ST Advanced";
case P_ID_MITSUBISHI_STD:
return "Mitsubishi Standard";
case P_ID_MITSUBISHI_EXT:
return "Mitsubishi Extended";
case P_ID_SST_PAGE:
return "SST Page Write";
case P_ID_SST_OLD:
return "SST 39VF160x/39VF320x";
case P_ID_INTEL_PERFORMANCE:
return "Intel Performance Code";
case P_ID_INTEL_DATA:
return "Intel Data";
case P_ID_RESERVED:
return "Not Allowed / Reserved for Future Use";
default:
return "Unknown";
}
}
static void print_cfi_ident(struct cfi_ident *cfip)
{
#if 0
if (cfip->qry[0] != 'Q' || cfip->qry[1] != 'R' || cfip->qry[2] != 'Y') {
printk("Invalid CFI ident structure.\n");
return;
}
#endif
printk("Primary Vendor Command Set: %4.4X (%s)\n", cfip->P_ID, vendorname(cfip->P_ID));
if (cfip->P_ADR)
printk("Primary Algorithm Table at %4.4X\n", cfip->P_ADR);
else
printk("No Primary Algorithm Table\n");
printk("Alternative Vendor Command Set: %4.4X (%s)\n", cfip->A_ID, vendorname(cfip->A_ID));
if (cfip->A_ADR)
printk("Alternate Algorithm Table at %4.4X\n", cfip->A_ADR);
else
printk("No Alternate Algorithm Table\n");
printk("Vcc Minimum: %2d.%d V\n", cfip->VccMin >> 4, cfip->VccMin & 0xf);
printk("Vcc Maximum: %2d.%d V\n", cfip->VccMax >> 4, cfip->VccMax & 0xf);
if (cfip->VppMin) {
printk("Vpp Minimum: %2d.%d V\n", cfip->VppMin >> 4, cfip->VppMin & 0xf);
printk("Vpp Maximum: %2d.%d V\n", cfip->VppMax >> 4, cfip->VppMax & 0xf);
}
else
printk("No Vpp line\n");
printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp);
printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp));
if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) {
printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp);
printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp));
}
else
printk("Full buffer write not supported\n");
printk("Typical block erase timeout: %d ms\n", 1<<cfip->BlockEraseTimeoutTyp);
printk("Maximum block erase timeout: %d ms\n", (1<<cfip->BlockEraseTimeoutMax) * (1<<cfip->BlockEraseTimeoutTyp));
if (cfip->ChipEraseTimeoutTyp || cfip->ChipEraseTimeoutMax) {
printk("Typical chip erase timeout: %d ms\n", 1<<cfip->ChipEraseTimeoutTyp);
printk("Maximum chip erase timeout: %d ms\n", (1<<cfip->ChipEraseTimeoutMax) * (1<<cfip->ChipEraseTimeoutTyp));
}
else
printk("Chip erase not supported\n");
printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20));
printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc);
switch(cfip->InterfaceDesc) {
case CFI_INTERFACE_X8_ASYNC:
printk(" - x8-only asynchronous interface\n");
break;
case CFI_INTERFACE_X16_ASYNC:
printk(" - x16-only asynchronous interface\n");
break;
case CFI_INTERFACE_X8_BY_X16_ASYNC:
printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n");
break;
case CFI_INTERFACE_X32_ASYNC:
printk(" - x32-only asynchronous interface\n");
break;
case CFI_INTERFACE_X16_BY_X32_ASYNC:
printk(" - supports x16 and x32 via Word# with asynchronous interface\n");
break;
case CFI_INTERFACE_NOT_ALLOWED:
printk(" - Not Allowed / Reserved\n");
break;
default:
printk(" - Unknown\n");
break;
}
printk("Max. bytes in buffer write: 0x%x\n", 1<< cfip->MaxBufWriteSize);
printk("Number of Erase Block Regions: %d\n", cfip->NumEraseRegions);
}
#endif /* DEBUG_CFI */
static struct chip_probe cfi_chip_probe = {
.name = "CFI",
.probe_chip = cfi_probe_chip
};
struct mtd_info *cfi_probe(struct map_info *map)
{
/*
* Just use the generic probe stuff to call our CFI-specific
* chip_probe routine in all the possible permutations, etc.
*/
return mtd_do_chip_probe(map, &cfi_chip_probe);
}
static struct mtd_chip_driver cfi_chipdrv = {
.probe = cfi_probe,
.name = "cfi_probe",
.module = THIS_MODULE
};
static int __init cfi_probe_init(void)
{
register_mtd_chip_driver(&cfi_chipdrv);
return 0;
}
static void __exit cfi_probe_exit(void)
{
unregister_mtd_chip_driver(&cfi_chipdrv);
}
module_init(cfi_probe_init);
module_exit(cfi_probe_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]> et al.");
MODULE_DESCRIPTION("Probe code for CFI-compliant flash chips");
| linux-master | drivers/mtd/chips/cfi_probe.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common Flash Interface support:
* ST Advanced Architecture Command Set (ID 0x0020)
*
* (C) 2000 Red Hat.
*
* 10/10/2000 Nicolas Pitre <[email protected]>
* - completely revamped method functions so they are aware and
* independent of the flash geometry (buswidth, interleave, etc.)
* - scalability vs code size is completely set at compile-time
* (see include/linux/mtd/cfi.h for selection)
* - optimized write buffer method
* 06/21/2002 Joern Engel <[email protected]> and others
* - modified Intel Command Set 0x0001 to support ST Advanced Architecture
* (command set 0x0020)
* - added a writev function
* 07/13/2005 Joern Engel <[email protected]>
* - Plugged memory leak in cfi_staa_writev().
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/mtd.h>
static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen);
static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
static void cfi_staa_sync (struct mtd_info *);
static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_staa_suspend (struct mtd_info *);
static void cfi_staa_resume (struct mtd_info *);
static void cfi_staa_destroy(struct mtd_info *);
struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
static struct mtd_info *cfi_staa_setup (struct map_info *);
static struct mtd_chip_driver cfi_staa_chipdrv = {
.probe = NULL, /* Not usable directly */
.destroy = cfi_staa_destroy,
.name = "cfi_cmdset_0020",
.module = THIS_MODULE
};
/* #define DEBUG_LOCK_BITS */
//#define DEBUG_CFI_FEATURES
#ifdef DEBUG_CFI_FEATURES
static void cfi_tell_features(struct cfi_pri_intelext *extp)
{
int i;
printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
for (i=9; i<32; i++) {
if (extp->FeatureSupport & (1<<i))
printk(" - Unknown Bit %X: supported\n", i);
}
printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
for (i=1; i<8; i++) {
if (extp->SuspendCmdSupport & (1<<i))
printk(" - Unknown Bit %X: supported\n", i);
}
printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
for (i=2; i<16; i++) {
if (extp->BlkStatusRegMask & (1<<i))
printk(" - Unknown Bit %X Active: yes\n",i);
}
printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
if (extp->VppOptimal)
printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
}
#endif
/* This routine is made available to other mtd code via
* inter_module_register. It must only be accessed through
* inter_module_get which will bump the use count of this module. The
* addresses passed back in cfi are valid as long as the use count of
* this module is non-zero, i.e. between inter_module_get and
* inter_module_put. Keith Owens <[email protected]> 29 Oct 2000.
*/
struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
int i;
if (cfi->cfi_mode) {
/*
* It's a real CFI chip, not one for which the probe
* routine faked a CFI structure. So we read the feature
* table from it.
*/
__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
struct cfi_pri_intelext *extp;
extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
if (!extp)
return NULL;
if (extp->MajorVersion != '1' ||
(extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
printk(KERN_ERR " Unknown ST Microelectronics"
" Extended Query version %c.%c.\n",
extp->MajorVersion, extp->MinorVersion);
kfree(extp);
return NULL;
}
/* Do some byteswapping if necessary */
extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
extp->BlkStatusRegMask = cfi32_to_cpu(map,
extp->BlkStatusRegMask);
#ifdef DEBUG_CFI_FEATURES
/* Tell the user about it in lots of lovely detail */
cfi_tell_features(extp);
#endif
/* Install our own private info structure */
cfi->cmdset_priv = extp;
}
for (i=0; i< cfi->numchips; i++) {
cfi->chips[i].word_write_time = 128;
cfi->chips[i].buffer_write_time = 128;
cfi->chips[i].erase_time = 1024;
cfi->chips[i].ref_point_counter = 0;
init_waitqueue_head(&(cfi->chips[i].wq));
}
return cfi_staa_setup(map);
}
EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
static struct mtd_info *cfi_staa_setup(struct map_info *map)
{
struct cfi_private *cfi = map->fldrv_priv;
struct mtd_info *mtd;
unsigned long offset = 0;
int i,j;
unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
if (!mtd) {
kfree(cfi->cmdset_priv);
return NULL;
}
mtd->priv = map;
mtd->type = MTD_NORFLASH;
mtd->size = devsize * cfi->numchips;
mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
sizeof(struct mtd_erase_region_info),
GFP_KERNEL);
if (!mtd->eraseregions) {
kfree(cfi->cmdset_priv);
kfree(mtd);
return NULL;
}
for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
unsigned long ernum, ersize;
ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
if (mtd->erasesize < ersize) {
mtd->erasesize = ersize;
}
for (j=0; j<cfi->numchips; j++) {
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
}
offset += (ersize * ernum);
}
if (offset != devsize) {
/* Argh */
printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
kfree(mtd->eraseregions);
kfree(cfi->cmdset_priv);
kfree(mtd);
return NULL;
}
for (i=0; i<mtd->numeraseregions;i++){
printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
i, (unsigned long long)mtd->eraseregions[i].offset,
mtd->eraseregions[i].erasesize,
mtd->eraseregions[i].numblocks);
}
/* Also select the correct geometry setup too */
mtd->_erase = cfi_staa_erase_varsize;
mtd->_read = cfi_staa_read;
mtd->_write = cfi_staa_write_buffers;
mtd->_writev = cfi_staa_writev;
mtd->_sync = cfi_staa_sync;
mtd->_lock = cfi_staa_lock;
mtd->_unlock = cfi_staa_unlock;
mtd->_suspend = cfi_staa_suspend;
mtd->_resume = cfi_staa_resume;
mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
map->fldrv = &cfi_staa_chipdrv;
__module_get(THIS_MODULE);
mtd->name = map->name;
return mtd;
}
static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
{
map_word status, status_OK;
unsigned long timeo;
DECLARE_WAITQUEUE(wait, current);
int suspended = 0;
unsigned long cmd_addr;
struct cfi_private *cfi = map->fldrv_priv;
adr += chip->start;
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
/* Let's determine this according to the interleave only once */
status_OK = CMD(0x80);
timeo = jiffies + HZ;
retry:
mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us.
* If it's in FL_ERASING state, suspend it and make it talk now.
*/
switch (chip->state) {
case FL_ERASING:
if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
goto sleep; /* We don't support erase suspend */
map_write (map, CMD(0xb0), cmd_addr);
/* If the flash has finished erasing, then 'erase suspend'
* appears to make some (28F320) flash devices switch to
* 'read' mode. Make sure that we switch to 'read status'
* mode so we get the right data. --rmk
*/
map_write(map, CMD(0x70), cmd_addr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
// printk("Erase suspending at 0x%lx\n", cmd_addr);
for (;;) {
status = map_read(map, cmd_addr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
if (time_after(jiffies, timeo)) {
/* Urgh */
map_write(map, CMD(0xd0), cmd_addr);
/* make sure we're in 'read status' mode */
map_write(map, CMD(0x70), cmd_addr);
chip->state = FL_ERASING;
wake_up(&chip->wq);
mutex_unlock(&chip->mutex);
printk(KERN_ERR "Chip not ready after erase "
"suspended: status = 0x%lx\n", status.x[0]);
return -EIO;
}
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex);
}
suspended = 1;
map_write(map, CMD(0xff), cmd_addr);
chip->state = FL_READY;
break;
#if 0
case FL_WRITING:
/* Not quite yet */
#endif
case FL_READY:
break;
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
map_write(map, CMD(0x70), cmd_addr);
chip->state = FL_STATUS;
fallthrough;
case FL_STATUS:
status = map_read(map, cmd_addr);
if (map_word_andequal(map, status, status_OK, status_OK)) {
map_write(map, CMD(0xff), cmd_addr);
chip->state = FL_READY;
break;
}
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
default:
sleep:
/* Stick ourselves on a wait queue to be woken when
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
goto retry;
}
map_copy_from(map, buf, adr, len);
if (suspended) {
chip->state = chip->oldstate;
/* What if one interleaved chip has finished and the
other hasn't? The old code would leave the finished
one in READY mode. That's bad, and caused -EROFS
errors to be returned from do_erase_oneblock because
that's the only bit it checked for at the time.
As the state machine appears to explicitly allow
sending the 0x70 (Read Status) command to an erasing
chip and expecting it to be ignored, that's what we
do. */
map_write(map, CMD(0xd0), cmd_addr);
map_write(map, CMD(0x70), cmd_addr);
}
wake_up(&chip->wq);
mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs;
int chipnum;
int ret = 0;
/* ofs: offset within the first chip that the first read should start */
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
while (len) {
unsigned long thislen;
if (chipnum >= cfi->numchips)
break;
if ((len + ofs -1) >> cfi->chipshift)
thislen = (1<<cfi->chipshift) - ofs;
else
thislen = len;
ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
if (ret)
break;
*retlen += thislen;
len -= thislen;
buf += thislen;
ofs = 0;
chipnum++;
}
return ret;
}
static int do_write_buffer(struct map_info *map, struct flchip *chip,
unsigned long adr, const u_char *buf, int len)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, status_OK;
unsigned long cmd_adr, timeo;
DECLARE_WAITQUEUE(wait, current);
int wbufsize, z;
/* M58LW064A requires bus alignment for buffer wriets -- saw */
if (adr & (map_bankwidth(map)-1))
return -EINVAL;
wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
adr += chip->start;
cmd_adr = adr & ~(wbufsize-1);
/* Let's determine this according to the interleave only once */
status_OK = CMD(0x80);
timeo = jiffies + HZ;
retry:
#ifdef DEBUG_CFI_FEATURES
printk("%s: chip->state[%d]\n", __func__, chip->state);
#endif
mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us.
* Later, we can actually think about interrupting it
* if it's in FL_ERASING state.
* Not just yet, though.
*/
switch (chip->state) {
case FL_READY:
break;
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
#ifdef DEBUG_CFI_FEATURES
printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
#endif
fallthrough;
case FL_STATUS:
status = map_read(map, cmd_adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
status.x[0], map_read(map, cmd_adr).x[0]);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
default:
/* Stick ourselves on a wait queue to be woken when
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
goto retry;
}
ENABLE_VPP(map);
map_write(map, CMD(0xe8), cmd_adr);
chip->state = FL_WRITING_TO_BUFFER;
z = 0;
for (;;) {
status = map_read(map, cmd_adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex);
if (++z > 100) {
/* Argh. Not ready for write to buffer */
DISABLE_VPP(map);
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
mutex_unlock(&chip->mutex);
printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
return -EIO;
}
}
/* Write length of data to come */
map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
/* Write data */
for (z = 0; z < len;
z += map_bankwidth(map), buf += map_bankwidth(map)) {
map_word d;
d = map_word_load(map, buf);
map_write(map, d, adr+z);
}
/* GO GO GO */
map_write(map, CMD(0xd0), cmd_adr);
chip->state = FL_WRITING;
mutex_unlock(&chip->mutex);
cfi_udelay(chip->buffer_write_time);
mutex_lock(&chip->mutex);
timeo = jiffies + (HZ/2);
z = 0;
for (;;) {
if (chip->state != FL_WRITING) {
/* Someone's suspended the write. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
mutex_lock(&chip->mutex);
continue;
}
status = map_read(map, cmd_adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
/* OK Still waiting */
if (time_after(jiffies, timeo)) {
/* clear status */
map_write(map, CMD(0x50), cmd_adr);
/* put back into read status register mode */
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
DISABLE_VPP(map);
mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
mutex_unlock(&chip->mutex);
cfi_udelay(1);
z++;
mutex_lock(&chip->mutex);
}
if (!z) {
chip->buffer_write_time--;
if (!chip->buffer_write_time)
chip->buffer_write_time++;
}
if (z > 1)
chip->buffer_write_time++;
/* Done and happy. */
DISABLE_VPP(map);
chip->state = FL_STATUS;
/* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
if (map_word_bitsset(map, status, CMD(0x3a))) {
#ifdef DEBUG_CFI_FEATURES
printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
#endif
/* clear status */
map_write(map, CMD(0x50), cmd_adr);
/* put back into read status register mode */
map_write(map, CMD(0x70), adr);
wake_up(&chip->wq);
mutex_unlock(&chip->mutex);
return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
}
wake_up(&chip->wq);
mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
size_t len, size_t *retlen, const u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
int ret;
int chipnum;
unsigned long ofs;
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
#ifdef DEBUG_CFI_FEATURES
printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
#endif
/* Write buffer is worth it only if more than one word to write... */
while (len > 0) {
/* We must not cross write block boundaries */
int size = wbufsize - (ofs & (wbufsize-1));
if (size > len)
size = len;
ret = do_write_buffer(map, &cfi->chips[chipnum],
ofs, buf, size);
if (ret)
return ret;
ofs += size;
buf += size;
(*retlen) += size;
len -= size;
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
return 0;
}
/*
* Writev for ECC-Flashes is a little more complicated. We need to maintain
* a small buffer for this.
* XXX: If the buffer size is not a multiple of 2, this will break
*/
#define ECCBUF_SIZE (mtd->writesize)
#define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
#define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
static int
cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
unsigned long i;
size_t totlen = 0, thislen;
int ret = 0;
size_t buflen = 0;
char *buffer;
if (!ECCBUF_SIZE) {
/* We should fall back to a general writev implementation.
* Until that is written, just break.
*/
return -EIO;
}
buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
for (i=0; i<count; i++) {
size_t elem_len = vecs[i].iov_len;
void *elem_base = vecs[i].iov_base;
if (!elem_len) /* FIXME: Might be unnecessary. Check that */
continue;
if (buflen) { /* cut off head */
if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
memcpy(buffer+buflen, elem_base, elem_len);
buflen += elem_len;
continue;
}
memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
buffer);
totlen += thislen;
if (ret || thislen != ECCBUF_SIZE)
goto write_error;
elem_len -= thislen-buflen;
elem_base += thislen-buflen;
to += ECCBUF_SIZE;
}
if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
&thislen, elem_base);
totlen += thislen;
if (ret || thislen != ECCBUF_DIV(elem_len))
goto write_error;
to += thislen;
}
buflen = ECCBUF_MOD(elem_len); /* cut off tail */
if (buflen) {
memset(buffer, 0xff, ECCBUF_SIZE);
memcpy(buffer, elem_base + thislen, buflen);
}
}
if (buflen) { /* flush last page, even if not full */
/* This is sometimes intended behaviour, really */
ret = mtd_write(mtd, to, buflen, &thislen, buffer);
totlen += thislen;
if (ret || thislen != ECCBUF_SIZE)
goto write_error;
}
write_error:
if (retlen)
*retlen = totlen;
kfree(buffer);
return ret;
}
static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, status_OK;
unsigned long timeo;
int retries = 3;
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
adr += chip->start;
/* Let's determine this according to the interleave only once */
status_OK = CMD(0x80);
timeo = jiffies + HZ;
retry:
mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
case FL_READY:
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
fallthrough;
case FL_STATUS:
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
default:
/* Stick ourselves on a wait queue to be woken when
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
goto retry;
}
ENABLE_VPP(map);
/* Clear the status register first */
map_write(map, CMD(0x50), adr);
/* Now erase */
map_write(map, CMD(0x20), adr);
map_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
mutex_unlock(&chip->mutex);
msleep(1000);
mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
timeo = jiffies + (HZ*20);
for (;;) {
if (chip->state != FL_ERASING) {
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ*20); /* FIXME */
mutex_lock(&chip->mutex);
continue;
}
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
/* OK Still waiting */
if (time_after(jiffies, timeo)) {
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex);
}
DISABLE_VPP(map);
ret = 0;
/* We've broken this before. It doesn't hurt to be safe */
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
status = map_read(map, adr);
/* check for lock bit */
if (map_word_bitsset(map, status, CMD(0x3a))) {
unsigned char chipstatus = status.x[0];
if (!map_word_equal(map, status, CMD(chipstatus))) {
int i, w;
for (w=0; w<map_words(map); w++) {
for (i = 0; i<cfi_interleave(cfi); i++) {
chipstatus |= status.x[w] >> (cfi->device_type * 8);
}
}
printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
status.x[0], chipstatus);
}
/* Reset the error bits */
map_write(map, CMD(0x50), adr);
map_write(map, CMD(0x70), adr);
if ((chipstatus & 0x30) == 0x30) {
printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
ret = -EIO;
} else if (chipstatus & 0x02) {
/* Protection bit set */
ret = -EROFS;
} else if (chipstatus & 0x8) {
/* Voltage */
printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
ret = -EIO;
} else if (chipstatus & 0x20) {
if (retries--) {
printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
timeo = jiffies + HZ;
chip->state = FL_STATUS;
mutex_unlock(&chip->mutex);
goto retry;
}
printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
ret = -EIO;
}
}
wake_up(&chip->wq);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_staa_erase_varsize(struct mtd_info *mtd,
struct erase_info *instr)
{ struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long adr, len;
int chipnum, ret;
int i, first;
struct mtd_erase_region_info *regions = mtd->eraseregions;
/* Check that both start and end of the requested erase are
* aligned with the erasesize at the appropriate addresses.
*/
i = 0;
/* Skip all erase regions which are ended before the start of
the requested erase. Actually, to save on the calculations,
we skip to the first erase region which starts after the
start of the requested erase, and then go back one.
*/
while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
i++;
i--;
/* OK, now i is pointing at the erase region in which this
erase request starts. Check the start of the requested
erase range is aligned with the erase size which is in
effect here.
*/
if (instr->addr & (regions[i].erasesize-1))
return -EINVAL;
/* Remember the erase region we start on */
first = i;
/* Next, check that the end of the requested erase is aligned
* with the erase region at that address.
*/
while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
i++;
/* As before, drop back one to point at the region in which
the address actually falls
*/
i--;
if ((instr->addr + instr->len) & (regions[i].erasesize-1))
return -EINVAL;
chipnum = instr->addr >> cfi->chipshift;
adr = instr->addr - (chipnum << cfi->chipshift);
len = instr->len;
i=first;
while(len) {
ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
if (ret)
return ret;
adr += regions[i].erasesize;
len -= regions[i].erasesize;
if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
i++;
if (adr >> cfi->chipshift) {
adr = 0;
chipnum++;
if (chipnum >= cfi->numchips)
break;
}
}
return 0;
}
static void cfi_staa_sync (struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i;
struct flchip *chip;
int ret = 0;
DECLARE_WAITQUEUE(wait, current);
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
retry:
mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
case FL_STATUS:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
chip->oldstate = chip->state;
chip->state = FL_SYNCING;
/* No need to wake_up() on this state change -
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
fallthrough;
case FL_SYNCING:
mutex_unlock(&chip->mutex);
break;
default:
/* Not an idle state */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
goto retry;
}
}
/* Unlock the chips again */
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
mutex_unlock(&chip->mutex);
}
}
static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, status_OK;
unsigned long timeo = jiffies + HZ;
DECLARE_WAITQUEUE(wait, current);
adr += chip->start;
/* Let's determine this according to the interleave only once */
status_OK = CMD(0x80);
timeo = jiffies + HZ;
retry:
mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
case FL_READY:
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
fallthrough;
case FL_STATUS:
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
default:
/* Stick ourselves on a wait queue to be woken when
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
goto retry;
}
ENABLE_VPP(map);
map_write(map, CMD(0x60), adr);
map_write(map, CMD(0x01), adr);
chip->state = FL_LOCKING;
mutex_unlock(&chip->mutex);
msleep(1000);
mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
timeo = jiffies + (HZ*2);
for (;;) {
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
/* OK Still waiting */
if (time_after(jiffies, timeo)) {
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex);
}
/* Done and happy. */
chip->state = FL_STATUS;
DISABLE_VPP(map);
wake_up(&chip->wq);
mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long adr;
int chipnum, ret;
#ifdef DEBUG_LOCK_BITS
int ofs_factor = cfi->interleave * cfi->device_type;
#endif
if (ofs & (mtd->erasesize - 1))
return -EINVAL;
if (len & (mtd->erasesize -1))
return -EINVAL;
chipnum = ofs >> cfi->chipshift;
adr = ofs - (chipnum << cfi->chipshift);
while(len) {
#ifdef DEBUG_LOCK_BITS
cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
#endif
ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
#ifdef DEBUG_LOCK_BITS
cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
#endif
if (ret)
return ret;
adr += mtd->erasesize;
len -= mtd->erasesize;
if (adr >> cfi->chipshift) {
adr = 0;
chipnum++;
if (chipnum >= cfi->numchips)
break;
}
}
return 0;
}
static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, status_OK;
unsigned long timeo = jiffies + HZ;
DECLARE_WAITQUEUE(wait, current);
adr += chip->start;
/* Let's determine this according to the interleave only once */
status_OK = CMD(0x80);
timeo = jiffies + HZ;
retry:
mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
case FL_READY:
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
fallthrough;
case FL_STATUS:
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
default:
/* Stick ourselves on a wait queue to be woken when
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
goto retry;
}
ENABLE_VPP(map);
map_write(map, CMD(0x60), adr);
map_write(map, CMD(0xD0), adr);
chip->state = FL_UNLOCKING;
mutex_unlock(&chip->mutex);
msleep(1000);
mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
timeo = jiffies + (HZ*2);
for (;;) {
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
/* OK Still waiting */
if (time_after(jiffies, timeo)) {
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the unlock, wait a while and retry */
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex);
}
/* Done and happy. */
chip->state = FL_STATUS;
DISABLE_VPP(map);
wake_up(&chip->wq);
mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long adr;
int chipnum, ret;
#ifdef DEBUG_LOCK_BITS
int ofs_factor = cfi->interleave * cfi->device_type;
#endif
chipnum = ofs >> cfi->chipshift;
adr = ofs - (chipnum << cfi->chipshift);
#ifdef DEBUG_LOCK_BITS
{
unsigned long temp_adr = adr;
unsigned long temp_len = len;
cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
while (temp_len) {
printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
temp_adr += mtd->erasesize;
temp_len -= mtd->erasesize;
}
cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
}
#endif
ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
#ifdef DEBUG_LOCK_BITS
cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
#endif
return ret;
}
static int cfi_staa_suspend(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i;
struct flchip *chip;
int ret = 0;
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
case FL_STATUS:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
chip->oldstate = chip->state;
chip->state = FL_PM_SUSPENDED;
/* No need to wake_up() on this state change -
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
break;
case FL_PM_SUSPENDED:
break;
default:
ret = -EAGAIN;
break;
}
mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
if (ret) {
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
/* No need to force it into a known state here,
because we're returning failure, and it didn't
get power cycled */
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
mutex_unlock(&chip->mutex);
}
}
return ret;
}
static void cfi_staa_resume(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i;
struct flchip *chip;
for (i=0; i<cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
map_write(map, CMD(0xFF), 0);
chip->state = FL_READY;
wake_up(&chip->wq);
}
mutex_unlock(&chip->mutex);
}
}
static void cfi_staa_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
kfree(cfi->cmdset_priv);
kfree(cfi);
}
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/chips/cfi_cmdset_0020.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common Flash Interface support:
* Intel Extended Vendor Command Set (ID 0x0001)
*
* (C) 2000 Red Hat.
*
*
* 10/10/2000 Nicolas Pitre <[email protected]>
* - completely revamped method functions so they are aware and
* independent of the flash geometry (buswidth, interleave, etc.)
* - scalability vs code size is completely set at compile-time
* (see include/linux/mtd/cfi.h for selection)
* - optimized write buffer method
* 02/05/2002 Christopher Hoover <[email protected]>/<[email protected]>
* - reworked lock/unlock/erase support for var size flash
* 21/03/2007 Rodolfo Giometti <[email protected]>
* - auto unlock sectors on resume for auto locking flash on power up
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/bitmap.h>
#include <linux/mtd/xip.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/cfi.h>
/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
// debugging, turns off buffer write mode if set to 1
#define FORCE_WORD_WRITE 0
/* Intel chips */
#define I82802AB 0x00ad
#define I82802AC 0x00ac
#define PF38F4476 0x881c
#define M28F00AP30 0x8963
/* STMicroelectronics chips */
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
#define M50FLW080B 0x0081
/* Atmel chips */
#define AT49BV640D 0x02de
#define AT49BV640DT 0x02db
/* Sharp chips */
#define LH28F640BFHE_PTTL90 0x00b0
#define LH28F640BFHE_PBTL90 0x00b1
#define LH28F640BFHE_PTTL70A 0x00b2
#define LH28F640BFHE_PBTL70A 0x00b3
static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
static void cfi_intelext_sync (struct mtd_info *);
static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
uint64_t len);
#ifdef CONFIG_MTD_OTP
static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
size_t *, const u_char *);
static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
size_t *, struct otp_info *);
static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
size_t *, struct otp_info *);
#endif
static int cfi_intelext_suspend (struct mtd_info *);
static void cfi_intelext_resume (struct mtd_info *);
static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
static void cfi_intelext_destroy(struct mtd_info *);
struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys);
static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
#include "fwh_lock.h"
/*
* *********** SETUP AND PROBE BITS ***********
*/
static struct mtd_chip_driver cfi_intelext_chipdrv = {
.probe = NULL, /* Not usable directly */
.destroy = cfi_intelext_destroy,
.name = "cfi_cmdset_0001",
.module = THIS_MODULE
};
/* #define DEBUG_LOCK_BITS */
/* #define DEBUG_CFI_FEATURES */
#ifdef DEBUG_CFI_FEATURES
static void cfi_tell_features(struct cfi_pri_intelext *extp)
{
int i;
printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
for (i=11; i<32; i++) {
if (extp->FeatureSupport & (1<<i))
printk(" - Unknown Bit %X: supported\n", i);
}
printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
for (i=1; i<8; i++) {
if (extp->SuspendCmdSupport & (1<<i))
printk(" - Unknown Bit %X: supported\n", i);
}
printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
for (i=2; i<3; i++) {
if (extp->BlkStatusRegMask & (1<<i))
printk(" - Unknown Bit %X Active: yes\n",i);
}
printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
for (i=6; i<16; i++) {
if (extp->BlkStatusRegMask & (1<<i))
printk(" - Unknown Bit %X Active: yes\n",i);
}
printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
if (extp->VppOptimal)
printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
}
#endif
/* Atmel chips don't use the same PRI format as Intel chips */
static void fixup_convert_atmel_pri(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
struct cfi_pri_atmel atmel_pri;
uint32_t features = 0;
/* Reverse byteswapping */
extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
memcpy(&atmel_pri, extp, sizeof(atmel_pri));
memset((char *)extp + 5, 0, sizeof(*extp) - 5);
printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
if (atmel_pri.Features & 0x01) /* chip erase supported */
features |= (1<<0);
if (atmel_pri.Features & 0x02) /* erase suspend supported */
features |= (1<<1);
if (atmel_pri.Features & 0x04) /* program suspend supported */
features |= (1<<2);
if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
features |= (1<<9);
if (atmel_pri.Features & 0x20) /* page mode read supported */
features |= (1<<7);
if (atmel_pri.Features & 0x40) /* queued erase supported */
features |= (1<<4);
if (atmel_pri.Features & 0x80) /* Protection bits supported */
features |= (1<<6);
extp->FeatureSupport = features;
/* burst write mode not supported */
cfi->cfiq->BufWriteTimeoutTyp = 0;
cfi->cfiq->BufWriteTimeoutMax = 0;
}
static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
cfip->FeatureSupport |= (1 << 5);
mtd->flags |= MTD_POWERUP_LOCK;
}
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
static void fixup_intel_strataflash(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
"erase on write disabled.\n");
extp->SuspendCmdSupport &= ~1;
}
#endif
#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
static void fixup_no_write_suspend(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
if (cfip && (cfip->FeatureSupport&4)) {
cfip->FeatureSupport &= ~4;
printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
}
}
#endif
static void fixup_st_m28w320ct(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
}
static void fixup_st_m28w320cb(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
/* Note this is done after the region info is endian swapped */
cfi->cfiq->EraseRegionInfo[1] =
(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
};
static int is_LH28F640BF(struct cfi_private *cfi)
{
/* Sharp LH28F640BF Family */
if (cfi->mfr == CFI_MFR_SHARP && (
cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
return 1;
return 0;
}
static void fixup_LH28F640BF(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
/* Reset the Partition Configuration Register on LH28F640BF
* to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
if (is_LH28F640BF(cfi)) {
printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
map_write(map, CMD(0x60), 0);
map_write(map, CMD(0x04), 0);
/* We have set one single partition thus
* Simultaneous Operations are not allowed */
printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
extp->FeatureSupport &= ~512;
}
}
static void fixup_use_point(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
if (!mtd->_point && map_is_linear(map)) {
mtd->_point = cfi_intelext_point;
mtd->_unpoint = cfi_intelext_unpoint;
}
}
static void fixup_use_write_buffers(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
if (cfi->cfiq->BufWriteTimeoutTyp) {
printk(KERN_INFO "Using buffer write method\n" );
mtd->_write = cfi_intelext_write_buffers;
mtd->_writev = cfi_intelext_writev;
}
}
/*
* Some chips power-up with all sectors locked by default.
*/
static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
if (cfip->FeatureSupport&32) {
printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
mtd->flags |= MTD_POWERUP_LOCK;
}
}
static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
#endif
#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
#endif
#if !FORCE_WORD_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
#endif
{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
{ 0, 0, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
{ CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
{ CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
{ CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
{ CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
{ CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
{ 0, 0, NULL }
};
static struct cfi_fixup fixup_table[] = {
/* The CFI vendor ids and the JEDEC vendor IDs appear
* to be common. It is like the devices id's are as
* well. This table is to pick all cases where
* we know that is the case.
*/
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
{ 0, 0, NULL }
};
static void cfi_fixup_major_minor(struct cfi_private *cfi,
struct cfi_pri_intelext *extp)
{
if (cfi->mfr == CFI_MFR_INTEL &&
cfi->id == PF38F4476 && extp->MinorVersion == '3')
extp->MinorVersion = '1';
}
static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
{
/*
* Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
* Erase Supend for their small Erase Blocks(0x8000)
*/
if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
return 1;
return 0;
}
static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info *map, __u16 adr)
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp;
unsigned int extra_size = 0;
unsigned int extp_size = sizeof(*extp);
again:
extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
if (!extp)
return NULL;
cfi_fixup_major_minor(cfi, extp);
if (extp->MajorVersion != '1' ||
(extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
"version %c.%c.\n", extp->MajorVersion,
extp->MinorVersion);
kfree(extp);
return NULL;
}
/* Do some byteswapping if necessary */
extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
if (extp->MinorVersion >= '0') {
extra_size = 0;
/* Protection Register info */
if (extp->NumProtectionFields)
extra_size += (extp->NumProtectionFields - 1) *
sizeof(struct cfi_intelext_otpinfo);
}
if (extp->MinorVersion >= '1') {
/* Burst Read info */
extra_size += 2;
if (extp_size < sizeof(*extp) + extra_size)
goto need_more;
extra_size += extp->extra[extra_size - 1];
}
if (extp->MinorVersion >= '3') {
int nb_parts, i;
/* Number of hardware-partitions */
extra_size += 1;
if (extp_size < sizeof(*extp) + extra_size)
goto need_more;
nb_parts = extp->extra[extra_size - 1];
/* skip the sizeof(partregion) field in CFI 1.4 */
if (extp->MinorVersion >= '4')
extra_size += 2;
for (i = 0; i < nb_parts; i++) {
struct cfi_intelext_regioninfo *rinfo;
rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
extra_size += sizeof(*rinfo);
if (extp_size < sizeof(*extp) + extra_size)
goto need_more;
rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
extra_size += (rinfo->NumBlockTypes - 1)
* sizeof(struct cfi_intelext_blockinfo);
}
if (extp->MinorVersion >= '4')
extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
if (extp_size < sizeof(*extp) + extra_size) {
need_more:
extp_size = sizeof(*extp) + extra_size;
kfree(extp);
if (extp_size > 4096) {
printk(KERN_ERR
"%s: cfi_pri_intelext is too fat\n",
__func__);
return NULL;
}
goto again;
}
}
return extp;
}
struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
struct mtd_info *mtd;
int i;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
if (!mtd)
return NULL;
mtd->priv = map;
mtd->type = MTD_NORFLASH;
/* Fill in the default mtd operations */
mtd->_erase = cfi_intelext_erase_varsize;
mtd->_read = cfi_intelext_read;
mtd->_write = cfi_intelext_write_words;
mtd->_sync = cfi_intelext_sync;
mtd->_lock = cfi_intelext_lock;
mtd->_unlock = cfi_intelext_unlock;
mtd->_is_locked = cfi_intelext_is_locked;
mtd->_suspend = cfi_intelext_suspend;
mtd->_resume = cfi_intelext_resume;
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
if (cfi->cfi_mode == CFI_MODE_CFI) {
/*
* It's a real CFI chip, not one for which the probe
* routine faked a CFI structure. So we read the feature
* table from it.
*/
__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
struct cfi_pri_intelext *extp;
extp = read_pri_intelext(map, adr);
if (!extp) {
kfree(mtd);
return NULL;
}
/* Install our own private info structure */
cfi->cmdset_priv = extp;
cfi_fixup(mtd, cfi_fixup_table);
#ifdef DEBUG_CFI_FEATURES
/* Tell the user about it in lots of lovely detail */
cfi_tell_features(extp);
#endif
if(extp->SuspendCmdSupport & 1) {
printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
}
}
else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
/* Apply jedec specific fixups */
cfi_fixup(mtd, jedec_fixup_table);
}
/* Apply generic fixups */
cfi_fixup(mtd, fixup_table);
for (i=0; i< cfi->numchips; i++) {
if (cfi->cfiq->WordWriteTimeoutTyp)
cfi->chips[i].word_write_time =
1<<cfi->cfiq->WordWriteTimeoutTyp;
else
cfi->chips[i].word_write_time = 50000;
if (cfi->cfiq->BufWriteTimeoutTyp)
cfi->chips[i].buffer_write_time =
1<<cfi->cfiq->BufWriteTimeoutTyp;
/* No default; if it isn't specified, we won't use it */
if (cfi->cfiq->BlockEraseTimeoutTyp)
cfi->chips[i].erase_time =
1000<<cfi->cfiq->BlockEraseTimeoutTyp;
else
cfi->chips[i].erase_time = 2000000;
if (cfi->cfiq->WordWriteTimeoutTyp &&
cfi->cfiq->WordWriteTimeoutMax)
cfi->chips[i].word_write_time_max =
1<<(cfi->cfiq->WordWriteTimeoutTyp +
cfi->cfiq->WordWriteTimeoutMax);
else
cfi->chips[i].word_write_time_max = 50000 * 8;
if (cfi->cfiq->BufWriteTimeoutTyp &&
cfi->cfiq->BufWriteTimeoutMax)
cfi->chips[i].buffer_write_time_max =
1<<(cfi->cfiq->BufWriteTimeoutTyp +
cfi->cfiq->BufWriteTimeoutMax);
if (cfi->cfiq->BlockEraseTimeoutTyp &&
cfi->cfiq->BlockEraseTimeoutMax)
cfi->chips[i].erase_time_max =
1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
cfi->cfiq->BlockEraseTimeoutMax);
else
cfi->chips[i].erase_time_max = 2000000 * 8;
cfi->chips[i].ref_point_counter = 0;
init_waitqueue_head(&(cfi->chips[i].wq));
}
map->fldrv = &cfi_intelext_chipdrv;
return cfi_intelext_setup(mtd);
}
struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long offset = 0;
int i,j;
unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
mtd->size = devsize * cfi->numchips;
mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
mtd->eraseregions = kcalloc(mtd->numeraseregions,
sizeof(struct mtd_erase_region_info),
GFP_KERNEL);
if (!mtd->eraseregions)
goto setup_err;
for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
unsigned long ernum, ersize;
ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
if (mtd->erasesize < ersize) {
mtd->erasesize = ersize;
}
for (j=0; j<cfi->numchips; j++) {
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
goto setup_err;
}
offset += (ersize * ernum);
}
if (offset != devsize) {
/* Argh */
printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
goto setup_err;
}
for (i=0; i<mtd->numeraseregions;i++){
printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
i,(unsigned long long)mtd->eraseregions[i].offset,
mtd->eraseregions[i].erasesize,
mtd->eraseregions[i].numblocks);
}
#ifdef CONFIG_MTD_OTP
mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
#endif
/* This function has the potential to distort the reality
a bit and therefore should be called last. */
if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
goto setup_err;
__module_get(THIS_MODULE);
register_reboot_notifier(&mtd->reboot_notifier);
return mtd;
setup_err:
if (mtd->eraseregions)
for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
for (j=0; j<cfi->numchips; j++)
kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
kfree(mtd->eraseregions);
kfree(mtd);
kfree(cfi->cmdset_priv);
return NULL;
}
static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
struct cfi_private **pcfi)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = *pcfi;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
/*
* Probing of multi-partition flash chips.
*
* To support multiple partitions when available, we simply arrange
* for each of them to have their own flchip structure even if they
* are on the same physical chip. This means completely recreating
* a new cfi_private structure right here which is a blatent code
* layering violation, but this is still the least intrusive
* arrangement at this point. This can be rearranged in the future
* if someone feels motivated enough. --nico
*/
if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
&& extp->FeatureSupport & (1 << 9)) {
int offs = 0;
struct cfi_private *newcfi;
struct flchip *chip;
struct flchip_shared *shared;
int numregions, numparts, partshift, numvirtchips, i, j;
/* Protection Register info */
if (extp->NumProtectionFields)
offs = (extp->NumProtectionFields - 1) *
sizeof(struct cfi_intelext_otpinfo);
/* Burst Read info */
offs += extp->extra[offs+1]+2;
/* Number of partition regions */
numregions = extp->extra[offs];
offs += 1;
/* skip the sizeof(partregion) field in CFI 1.4 */
if (extp->MinorVersion >= '4')
offs += 2;
/* Number of hardware partitions */
numparts = 0;
for (i = 0; i < numregions; i++) {
struct cfi_intelext_regioninfo *rinfo;
rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
numparts += rinfo->NumIdentPartitions;
offs += sizeof(*rinfo)
+ (rinfo->NumBlockTypes - 1) *
sizeof(struct cfi_intelext_blockinfo);
}
if (!numparts)
numparts = 1;
/* Programming Region info */
if (extp->MinorVersion >= '4') {
struct cfi_intelext_programming_regioninfo *prinfo;
prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
mtd->flags &= ~MTD_BIT_WRITEABLE;
printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
map->name, mtd->writesize,
cfi->interleave * prinfo->ControlValid,
cfi->interleave * prinfo->ControlInvalid);
}
/*
* All functions below currently rely on all chips having
* the same geometry so we'll just assume that all hardware
* partitions are of the same size too.
*/
partshift = cfi->chipshift - __ffs(numparts);
if ((1 << partshift) < mtd->erasesize) {
printk( KERN_ERR
"%s: bad number of hw partitions (%d)\n",
__func__, numparts);
return -EINVAL;
}
numvirtchips = cfi->numchips * numparts;
newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips),
GFP_KERNEL);
if (!newcfi)
return -ENOMEM;
shared = kmalloc_array(cfi->numchips,
sizeof(struct flchip_shared),
GFP_KERNEL);
if (!shared) {
kfree(newcfi);
return -ENOMEM;
}
memcpy(newcfi, cfi, sizeof(struct cfi_private));
newcfi->numchips = numvirtchips;
newcfi->chipshift = partshift;
chip = &newcfi->chips[0];
for (i = 0; i < cfi->numchips; i++) {
shared[i].writing = shared[i].erasing = NULL;
mutex_init(&shared[i].lock);
for (j = 0; j < numparts; j++) {
*chip = cfi->chips[i];
chip->start += j << partshift;
chip->priv = &shared[i];
/* those should be reset too since
they create memory references. */
init_waitqueue_head(&chip->wq);
mutex_init(&chip->mutex);
chip++;
}
}
printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
"--> %d partitions of %d KiB\n",
map->name, cfi->numchips, cfi->interleave,
newcfi->numchips, 1<<(newcfi->chipshift-10));
map->fldrv_priv = newcfi;
*pcfi = newcfi;
kfree(cfi);
}
return 0;
}
/*
* *********** CHIP ACCESS FUNCTIONS ***********
*/
static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
{
DECLARE_WAITQUEUE(wait, current);
struct cfi_private *cfi = map->fldrv_priv;
map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
unsigned long timeo = jiffies + HZ;
/* Prevent setting state FL_SYNCING for chip in suspended state. */
if (mode == FL_SYNCING && chip->oldstate != FL_READY)
goto sleep;
switch (chip->state) {
case FL_STATUS:
for (;;) {
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
/* At this point we're fine with write operations
in other partitions as they don't conflict. */
if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
break;
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex);
/* Someone else might have been playing with it. */
return -EAGAIN;
}
fallthrough;
case FL_READY:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
return 0;
case FL_ERASING:
if (!cfip ||
!(cfip->FeatureSupport & 2) ||
!(mode == FL_READY || mode == FL_POINT ||
(mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
goto sleep;
/* Do not allow suspend iff read/write to EB address */
if ((adr & chip->in_progress_block_mask) ==
chip->in_progress_block_addr)
goto sleep;
/* do not suspend small EBs, buggy Micron Chips */
if (cfi_is_micron_28F00AP30(cfi, chip) &&
(chip->in_progress_block_mask == ~(0x8000-1)))
goto sleep;
/* Erase suspend */
map_write(map, CMD(0xB0), chip->in_progress_block_addr);
/* If the flash has finished erasing, then 'erase suspend'
* appears to make some (28F320) flash devices switch to
* 'read' mode. Make sure that we switch to 'read status'
* mode so we get the right data. --rmk
*/
map_write(map, CMD(0x70), chip->in_progress_block_addr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
chip->erase_suspended = 1;
for (;;) {
status = map_read(map, chip->in_progress_block_addr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
if (time_after(jiffies, timeo)) {
/* Urgh. Resume and pretend we weren't here.
* Make sure we're in 'read status' mode if it had finished */
put_chip(map, chip, adr);
printk(KERN_ERR "%s: Chip not ready after erase "
"suspended: status = 0x%lx\n", map->name, status.x[0]);
return -EIO;
}
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex);
/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
chip->state = FL_STATUS;
return 0;
case FL_XIP_WHILE_ERASING:
if (mode != FL_READY && mode != FL_POINT &&
(mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
goto sleep;
chip->oldstate = chip->state;
chip->state = FL_READY;
return 0;
case FL_SHUTDOWN:
/* The machine is rebooting now,so no one can get chip anymore */
return -EIO;
case FL_POINT:
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
fallthrough;
default:
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
return -EAGAIN;
}
}
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
{
int ret;
DECLARE_WAITQUEUE(wait, current);
retry:
if (chip->priv &&
(mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
|| mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
/*
* OK. We have possibility for contention on the write/erase
* operations which are global to the real chip and not per
* partition. So let's fight it over in the partition which
* currently has authority on the operation.
*
* The rules are as follows:
*
* - any write operation must own shared->writing.
*
* - any erase operation must own _both_ shared->writing and
* shared->erasing.
*
* - contention arbitration is handled in the owner's context.
*
* The 'shared' struct can be read and/or written only when
* its lock is taken.
*/
struct flchip_shared *shared = chip->priv;
struct flchip *contender;
mutex_lock(&shared->lock);
contender = shared->writing;
if (contender && contender != chip) {
/*
* The engine to perform desired operation on this
* partition is already in use by someone else.
* Let's fight over it in the context of the chip
* currently using it. If it is possible to suspend,
* that other partition will do just that, otherwise
* it'll happily send us to sleep. In any case, when
* get_chip returns success we're clear to go ahead.
*/
ret = mutex_trylock(&contender->mutex);
mutex_unlock(&shared->lock);
if (!ret)
goto retry;
mutex_unlock(&chip->mutex);
ret = chip_ready(map, contender, contender->start, mode);
mutex_lock(&chip->mutex);
if (ret == -EAGAIN) {
mutex_unlock(&contender->mutex);
goto retry;
}
if (ret) {
mutex_unlock(&contender->mutex);
return ret;
}
mutex_lock(&shared->lock);
/* We should not own chip if it is already
* in FL_SYNCING state. Put contender and retry. */
if (chip->state == FL_SYNCING) {
put_chip(map, contender, contender->start);
mutex_unlock(&contender->mutex);
goto retry;
}
mutex_unlock(&contender->mutex);
}
/* Check if we already have suspended erase
* on this chip. Sleep. */
if (mode == FL_ERASING && shared->erasing
&& shared->erasing->oldstate == FL_ERASING) {
mutex_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
goto retry;
}
/* We now own it */
shared->writing = chip;
if (mode == FL_ERASING)
shared->erasing = chip;
mutex_unlock(&shared->lock);
}
ret = chip_ready(map, chip, adr, mode);
if (ret == -EAGAIN)
goto retry;
return ret;
}
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
if (chip->priv) {
struct flchip_shared *shared = chip->priv;
mutex_lock(&shared->lock);
if (shared->writing == chip && chip->oldstate == FL_READY) {
/* We own the ability to write, but we're done */
shared->writing = shared->erasing;
if (shared->writing && shared->writing != chip) {
/* give back ownership to who we loaned it from */
struct flchip *loaner = shared->writing;
mutex_lock(&loaner->mutex);
mutex_unlock(&shared->lock);
mutex_unlock(&chip->mutex);
put_chip(map, loaner, loaner->start);
mutex_lock(&chip->mutex);
mutex_unlock(&loaner->mutex);
wake_up(&chip->wq);
return;
}
shared->erasing = NULL;
shared->writing = NULL;
} else if (shared->erasing == chip && shared->writing != chip) {
/*
* We own the ability to erase without the ability
* to write, which means the erase was suspended
* and some other partition is currently writing.
* Don't let the switch below mess things up since
* we don't have ownership to resume anything.
*/
mutex_unlock(&shared->lock);
wake_up(&chip->wq);
return;
}
mutex_unlock(&shared->lock);
}
switch(chip->oldstate) {
case FL_ERASING:
/* What if one interleaved chip has finished and the
other hasn't? The old code would leave the finished
one in READY mode. That's bad, and caused -EROFS
errors to be returned from do_erase_oneblock because
that's the only bit it checked for at the time.
As the state machine appears to explicitly allow
sending the 0x70 (Read Status) command to an erasing
chip and expecting it to be ignored, that's what we
do. */
map_write(map, CMD(0xd0), chip->in_progress_block_addr);
map_write(map, CMD(0x70), chip->in_progress_block_addr);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
break;
case FL_XIP_WHILE_ERASING:
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
break;
case FL_READY:
case FL_STATUS:
case FL_JEDEC_QUERY:
break;
default:
printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
}
wake_up(&chip->wq);
}
#ifdef CONFIG_MTD_XIP
/*
* No interrupt what so ever can be serviced while the flash isn't in array
* mode. This is ensured by the xip_disable() and xip_enable() functions
* enclosing any code path where the flash is known not to be in array mode.
* And within a XIP disabled code path, only functions marked with __xipram
* may be called and nothing else (it's a good thing to inspect generated
* assembly to make sure inline functions were actually inlined and that gcc
* didn't emit calls to its own support functions). Also configuring MTD CFI
* support to a single buswidth and a single interleave is also recommended.
*/
static void xip_disable(struct map_info *map, struct flchip *chip,
unsigned long adr)
{
/* TODO: chips with no XIP use should ignore and return */
(void) map_read(map, adr); /* ensure mmu mapping is up to date */
local_irq_disable();
}
static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
if (chip->state != FL_POINT && chip->state != FL_READY) {
map_write(map, CMD(0xff), adr);
chip->state = FL_READY;
}
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
}
/*
* When a delay is required for the flash operation to complete, the
* xip_wait_for_operation() function is polling for both the given timeout
* and pending (but still masked) hardware interrupts. Whenever there is an
* interrupt pending then the flash erase or write operation is suspended,
* array mode restored and interrupts unmasked. Task scheduling might also
* happen at that point. The CPU eventually returns from the interrupt or
* the call to schedule() and the suspended flash operation is resumed for
* the remaining of the delay period.
*
* Warning: this function _will_ fool interrupt latency tracing tools.
*/
static int __xipram xip_wait_for_operation(
struct map_info *map, struct flchip *chip,
unsigned long adr, unsigned int chip_op_time_max)
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
map_word status, OK = CMD(0x80);
unsigned long usec, suspended, start, done;
flstate_t oldstate, newstate;
start = xip_currtime();
usec = chip_op_time_max;
if (usec == 0)
usec = 500000;
done = 0;
do {
cpu_relax();
if (xip_irqpending() && cfip &&
((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
(chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
(cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
/*
* Let's suspend the erase or write operation when
* supported. Note that we currently don't try to
* suspend interleaved chips if there is already
* another operation suspended (imagine what happens
* when one chip was already done with the current
* operation while another chip suspended it, then
* we resume the whole thing at once). Yes, it
* can happen!
*/
usec -= done;
map_write(map, CMD(0xb0), adr);
map_write(map, CMD(0x70), adr);
suspended = xip_currtime();
do {
if (xip_elapsed_since(suspended) > 100000) {
/*
* The chip doesn't want to suspend
* after waiting for 100 msecs.
* This is a critical error but there
* is not much we can do here.
*/
return -EIO;
}
status = map_read(map, adr);
} while (!map_word_andequal(map, status, OK, OK));
/* Suspend succeeded */
oldstate = chip->state;
if (oldstate == FL_ERASING) {
if (!map_word_bitsset(map, status, CMD(0x40)))
break;
newstate = FL_XIP_WHILE_ERASING;
chip->erase_suspended = 1;
} else {
if (!map_word_bitsset(map, status, CMD(0x04)))
break;
newstate = FL_XIP_WHILE_WRITING;
chip->write_suspended = 1;
}
chip->state = newstate;
map_write(map, CMD(0xff), adr);
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
mutex_unlock(&chip->mutex);
xip_iprefetch();
cond_resched();
/*
* We're back. However someone else might have
* decided to go write to the chip if we are in
* a suspended erase state. If so let's wait
* until it's done.
*/
mutex_lock(&chip->mutex);
while (chip->state != newstate) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
}
/* Disallow XIP again */
local_irq_disable();
/* Resume the write or erase operation */
map_write(map, CMD(0xd0), adr);
map_write(map, CMD(0x70), adr);
chip->state = oldstate;
start = xip_currtime();
} else if (usec >= 1000000/HZ) {
/*
* Try to save on CPU power when waiting delay
* is at least a system timer tick period.
* No need to be extremely accurate here.
*/
xip_cpu_idle();
}
status = map_read(map, adr);
done = xip_elapsed_since(start);
} while (!map_word_andequal(map, status, OK, OK)
&& done < usec);
return (done >= usec) ? -ETIME : 0;
}
/*
* The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
* the flash is actively programming or erasing since we have to poll for
* the operation to complete anyway. We can't do that in a generic way with
* a XIP setup so do it before the actual flash operation in this case
* and stub it out from INVAL_CACHE_AND_WAIT.
*/
#define XIP_INVAL_CACHED_RANGE(map, from, size) \
INVALIDATE_CACHED_RANGE(map, from, size)
#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
xip_wait_for_operation(map, chip, cmd_adr, usec_max)
#else
#define xip_disable(map, chip, adr)
#define xip_enable(map, chip, adr)
#define XIP_INVAL_CACHED_RANGE(x...)
#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
static int inval_cache_and_wait_for_operation(
struct map_info *map, struct flchip *chip,
unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
unsigned int chip_op_time, unsigned int chip_op_time_max)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, status_OK = CMD(0x80);
int chip_state = chip->state;
unsigned int timeo, sleep_time, reset_timeo;
mutex_unlock(&chip->mutex);
if (inval_len)
INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
mutex_lock(&chip->mutex);
timeo = chip_op_time_max;
if (!timeo)
timeo = 500000;
reset_timeo = timeo;
sleep_time = chip_op_time / 2;
for (;;) {
if (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
continue;
}
status = map_read(map, cmd_adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
if (chip->erase_suspended && chip_state == FL_ERASING) {
/* Erase suspend occurred while sleep: reset timeout */
timeo = reset_timeo;
chip->erase_suspended = 0;
}
if (chip->write_suspended && chip_state == FL_WRITING) {
/* Write suspend occurred while sleep: reset timeout */
timeo = reset_timeo;
chip->write_suspended = 0;
}
if (!timeo) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
return -ETIME;
}
/* OK Still waiting. Drop the lock, wait a while and retry. */
mutex_unlock(&chip->mutex);
if (sleep_time >= 1000000/HZ) {
/*
* Half of the normal delay still remaining
* can be performed with a sleeping delay instead
* of busy waiting.
*/
msleep(sleep_time/1000);
timeo -= sleep_time;
sleep_time = 1000000/HZ;
} else {
udelay(1);
cond_resched();
timeo--;
}
mutex_lock(&chip->mutex);
}
/* Done and happy. */
chip->state = FL_STATUS;
return 0;
}
#endif
#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
{
unsigned long cmd_addr;
struct cfi_private *cfi = map->fldrv_priv;
int ret;
adr += chip->start;
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_POINT);
if (!ret) {
if (chip->state != FL_POINT && chip->state != FL_READY)
map_write(map, CMD(0xff), cmd_addr);
chip->state = FL_POINT;
chip->ref_point_counter++;
}
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs, last_end = 0;
int chipnum;
int ret;
if (!map->virt)
return -EINVAL;
/* Now lock the chip(s) to POINT state */
/* ofs: offset within the first chip that the first read should start */
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
*virt = map->virt + cfi->chips[chipnum].start + ofs;
if (phys)
*phys = map->phys + cfi->chips[chipnum].start + ofs;
while (len) {
unsigned long thislen;
if (chipnum >= cfi->numchips)
break;
/* We cannot point across chips that are virtually disjoint */
if (!last_end)
last_end = cfi->chips[chipnum].start;
else if (cfi->chips[chipnum].start != last_end)
break;
if ((len + ofs -1) >> cfi->chipshift)
thislen = (1<<cfi->chipshift) - ofs;
else
thislen = len;
ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
if (ret)
break;
*retlen += thislen;
len -= thislen;
ofs = 0;
last_end += 1 << cfi->chipshift;
chipnum++;
}
return 0;
}
static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs;
int chipnum, err = 0;
/* Now unlock the chip(s) POINT state */
/* ofs: offset within the first chip that the first read should start */
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
while (len && !err) {
unsigned long thislen;
struct flchip *chip;
chip = &cfi->chips[chipnum];
if (chipnum >= cfi->numchips)
break;
if ((len + ofs -1) >> cfi->chipshift)
thislen = (1<<cfi->chipshift) - ofs;
else
thislen = len;
mutex_lock(&chip->mutex);
if (chip->state == FL_POINT) {
chip->ref_point_counter--;
if(chip->ref_point_counter == 0)
chip->state = FL_READY;
} else {
printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
err = -EINVAL;
}
put_chip(map, chip, chip->start);
mutex_unlock(&chip->mutex);
len -= thislen;
ofs = 0;
chipnum++;
}
return err;
}
static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
{
unsigned long cmd_addr;
struct cfi_private *cfi = map->fldrv_priv;
int ret;
adr += chip->start;
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
if (chip->state != FL_POINT && chip->state != FL_READY) {
map_write(map, CMD(0xff), cmd_addr);
chip->state = FL_READY;
}
map_copy_from(map, buf, adr, len);
put_chip(map, chip, cmd_addr);
mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs;
int chipnum;
int ret = 0;
/* ofs: offset within the first chip that the first read should start */
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
while (len) {
unsigned long thislen;
if (chipnum >= cfi->numchips)
break;
if ((len + ofs -1) >> cfi->chipshift)
thislen = (1<<cfi->chipshift) - ofs;
else
thislen = len;
ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
if (ret)
break;
*retlen += thislen;
len -= thislen;
buf += thislen;
ofs = 0;
chipnum++;
}
return ret;
}
static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
unsigned long adr, map_word datum, int mode)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, write_cmd;
int ret;
adr += chip->start;
switch (mode) {
case FL_WRITING:
write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
break;
case FL_OTP_WRITE:
write_cmd = CMD(0xc0);
break;
default:
return -EINVAL;
}
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, mode);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
ENABLE_VPP(map);
xip_disable(map, chip, adr);
map_write(map, write_cmd, adr);
map_write(map, datum, adr);
chip->state = mode;
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, map_bankwidth(map),
chip->word_write_time,
chip->word_write_time_max);
if (ret) {
xip_enable(map, chip, adr);
printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
goto out;
}
/* check for errors */
status = map_read(map, adr);
if (map_word_bitsset(map, status, CMD(0x1a))) {
unsigned long chipstatus = MERGESTATUS(status);
/* reset status */
map_write(map, CMD(0x50), adr);
map_write(map, CMD(0x70), adr);
xip_enable(map, chip, adr);
if (chipstatus & 0x02) {
ret = -EROFS;
} else if (chipstatus & 0x08) {
printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
ret = -EIO;
} else {
printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
ret = -EINVAL;
}
goto out;
}
xip_enable(map, chip, adr);
out: DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int ret;
int chipnum;
unsigned long ofs;
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
/* If it's not bus-aligned, do the first byte write */
if (ofs & (map_bankwidth(map)-1)) {
unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
int gap = ofs - bus_ofs;
int n;
map_word datum;
n = min_t(int, len, map_bankwidth(map)-gap);
datum = map_word_ff(map);
datum = map_word_load_partial(map, datum, buf, gap, n);
ret = do_write_oneword(map, &cfi->chips[chipnum],
bus_ofs, datum, FL_WRITING);
if (ret)
return ret;
len -= n;
ofs += n;
buf += n;
(*retlen) += n;
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
while(len >= map_bankwidth(map)) {
map_word datum = map_word_load(map, buf);
ret = do_write_oneword(map, &cfi->chips[chipnum],
ofs, datum, FL_WRITING);
if (ret)
return ret;
ofs += map_bankwidth(map);
buf += map_bankwidth(map);
(*retlen) += map_bankwidth(map);
len -= map_bankwidth(map);
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
if (len & (map_bankwidth(map)-1)) {
map_word datum;
datum = map_word_ff(map);
datum = map_word_load_partial(map, datum, buf, 0, len);
ret = do_write_oneword(map, &cfi->chips[chipnum],
ofs, datum, FL_WRITING);
if (ret)
return ret;
(*retlen) += len;
}
return 0;
}
static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
unsigned long adr, const struct kvec **pvec,
unsigned long *pvec_seek, int len)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, write_cmd, datum;
unsigned long cmd_adr;
int ret, wbufsize, word_gap, words;
const struct kvec *vec;
unsigned long vec_seek;
unsigned long initial_adr;
int initial_len = len;
wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
adr += chip->start;
initial_adr = adr;
cmd_adr = adr & ~(wbufsize-1);
/* Sharp LH28F640BF chips need the first address for the
* Page Buffer Program command. See Table 5 of
* LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
if (is_LH28F640BF(cfi))
cmd_adr = adr;
/* Let's determine this according to the interleave only once */
write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_adr, FL_WRITING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
ENABLE_VPP(map);
xip_disable(map, chip, cmd_adr);
/* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
[...], the device will not accept any more Write to Buffer commands".
So we must check here and reset those bits if they're set. Otherwise
we're just pissing in the wind */
if (chip->state != FL_STATUS) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
}
status = map_read(map, cmd_adr);
if (map_word_bitsset(map, status, CMD(0x30))) {
xip_enable(map, chip, cmd_adr);
printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
xip_disable(map, chip, cmd_adr);
map_write(map, CMD(0x50), cmd_adr);
map_write(map, CMD(0x70), cmd_adr);
}
chip->state = FL_WRITING_TO_BUFFER;
map_write(map, write_cmd, cmd_adr);
ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
if (ret) {
/* Argh. Not ready for write to buffer */
map_word Xstatus = map_read(map, cmd_adr);
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
status = map_read(map, cmd_adr);
map_write(map, CMD(0x50), cmd_adr);
map_write(map, CMD(0x70), cmd_adr);
xip_enable(map, chip, cmd_adr);
printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
map->name, Xstatus.x[0], status.x[0]);
goto out;
}
/* Figure out the number of words to write */
word_gap = (-adr & (map_bankwidth(map)-1));
words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
if (!word_gap) {
words--;
} else {
word_gap = map_bankwidth(map) - word_gap;
adr -= word_gap;
datum = map_word_ff(map);
}
/* Write length of data to come */
map_write(map, CMD(words), cmd_adr );
/* Write data */
vec = *pvec;
vec_seek = *pvec_seek;
do {
int n = map_bankwidth(map) - word_gap;
if (n > vec->iov_len - vec_seek)
n = vec->iov_len - vec_seek;
if (n > len)
n = len;
if (!word_gap && len < map_bankwidth(map))
datum = map_word_ff(map);
datum = map_word_load_partial(map, datum,
vec->iov_base + vec_seek,
word_gap, n);
len -= n;
word_gap += n;
if (!len || word_gap == map_bankwidth(map)) {
map_write(map, datum, adr);
adr += map_bankwidth(map);
word_gap = 0;
}
vec_seek += n;
if (vec_seek == vec->iov_len) {
vec++;
vec_seek = 0;
}
} while (len);
*pvec = vec;
*pvec_seek = vec_seek;
/* GO GO GO */
map_write(map, CMD(0xd0), cmd_adr);
chip->state = FL_WRITING;
ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
initial_adr, initial_len,
chip->buffer_write_time,
chip->buffer_write_time_max);
if (ret) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
xip_enable(map, chip, cmd_adr);
printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
goto out;
}
/* check for errors */
status = map_read(map, cmd_adr);
if (map_word_bitsset(map, status, CMD(0x1a))) {
unsigned long chipstatus = MERGESTATUS(status);
/* reset status */
map_write(map, CMD(0x50), cmd_adr);
map_write(map, CMD(0x70), cmd_adr);
xip_enable(map, chip, cmd_adr);
if (chipstatus & 0x02) {
ret = -EROFS;
} else if (chipstatus & 0x08) {
printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
ret = -EIO;
} else {
printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
ret = -EINVAL;
}
goto out;
}
xip_enable(map, chip, cmd_adr);
out: DISABLE_VPP(map);
put_chip(map, chip, cmd_adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
int ret;
int chipnum;
unsigned long ofs, vec_seek, i;
size_t len = 0;
for (i = 0; i < count; i++)
len += vecs[i].iov_len;
if (!len)
return 0;
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
vec_seek = 0;
do {
/* We must not cross write block boundaries */
int size = wbufsize - (ofs & (wbufsize-1));
if (size > len)
size = len;
ret = do_write_buffer(map, &cfi->chips[chipnum],
ofs, &vecs, &vec_seek, size);
if (ret)
return ret;
ofs += size;
(*retlen) += size;
len -= size;
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
/* Be nice and reschedule with the chip in a usable state for other
processes. */
cond_resched();
} while (len);
return 0;
}
static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
size_t len, size_t *retlen, const u_char *buf)
{
struct kvec vec;
vec.iov_base = (void *) buf;
vec.iov_len = len;
return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
}
static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status;
int retries = 3;
int ret;
adr += chip->start;
retry:
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
XIP_INVAL_CACHED_RANGE(map, adr, len);
ENABLE_VPP(map);
xip_disable(map, chip, adr);
/* Clear the status register first */
map_write(map, CMD(0x50), adr);
/* Now erase */
map_write(map, CMD(0x20), adr);
map_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
chip->in_progress_block_mask = ~(len - 1);
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, len,
chip->erase_time,
chip->erase_time_max);
if (ret) {
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
xip_enable(map, chip, adr);
printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
goto out;
}
/* We've broken this before. It doesn't hurt to be safe */
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
status = map_read(map, adr);
/* check for errors */
if (map_word_bitsset(map, status, CMD(0x3a))) {
unsigned long chipstatus = MERGESTATUS(status);
/* Reset the error bits */
map_write(map, CMD(0x50), adr);
map_write(map, CMD(0x70), adr);
xip_enable(map, chip, adr);
if ((chipstatus & 0x30) == 0x30) {
printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
ret = -EINVAL;
} else if (chipstatus & 0x02) {
/* Protection bit set */
ret = -EROFS;
} else if (chipstatus & 0x8) {
/* Voltage */
printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
ret = -EIO;
} else if (chipstatus & 0x20 && retries--) {
printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
goto retry;
} else {
printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
ret = -EIO;
}
goto out;
}
xip_enable(map, chip, adr);
out: DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
{
return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
instr->len, NULL);
}
static void cfi_intelext_sync (struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i;
struct flchip *chip;
int ret = 0;
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SYNCING);
if (!ret) {
chip->oldstate = chip->state;
chip->state = FL_SYNCING;
/* No need to wake_up() on this state change -
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
}
mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
mutex_unlock(&chip->mutex);
}
}
static int __xipram do_getlockstatus_oneblock(struct map_info *map,
struct flchip *chip,
unsigned long adr,
int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
int status, ofs_factor = cfi->interleave * cfi->device_type;
adr += chip->start;
xip_disable(map, chip, adr+(2*ofs_factor));
map_write(map, CMD(0x90), adr+(2*ofs_factor));
chip->state = FL_JEDEC_QUERY;
status = cfi_read_query(map, adr+(2*ofs_factor));
xip_enable(map, chip, 0);
return status;
}
#ifdef DEBUG_LOCK_BITS
static int __xipram do_printlockstatus_oneblock(struct map_info *map,
struct flchip *chip,
unsigned long adr,
int len, void *thunk)
{
printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
return 0;
}
#endif
#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
int mdelay;
int ret;
adr += chip->start;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
ENABLE_VPP(map);
xip_disable(map, chip, adr);
map_write(map, CMD(0x60), adr);
if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
map_write(map, CMD(0x01), adr);
chip->state = FL_LOCKING;
} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
map_write(map, CMD(0xD0), adr);
chip->state = FL_UNLOCKING;
} else
BUG();
/*
* If Instant Individual Block Locking supported then no need
* to delay.
*/
/*
* Unlocking may take up to 1.4 seconds on some Intel flashes. So
* lets use a max of 1.5 seconds (1500ms) as timeout.
*
* See "Clear Block Lock-Bits Time" on page 40 in
* "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
* from February 2003
*/
mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
if (ret) {
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
xip_enable(map, chip, adr);
printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
goto out;
}
xip_enable(map, chip, adr);
out: DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret;
#ifdef DEBUG_LOCK_BITS
printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
__func__, ofs, len);
cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
ofs, len, NULL);
#endif
ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
#ifdef DEBUG_LOCK_BITS
printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
__func__, ret);
cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
ofs, len, NULL);
#endif
return ret;
}
static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret;
#ifdef DEBUG_LOCK_BITS
printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
__func__, ofs, len);
cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
ofs, len, NULL);
#endif
ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
#ifdef DEBUG_LOCK_BITS
printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
__func__, ret);
cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
ofs, len, NULL);
#endif
return ret;
}
static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
uint64_t len)
{
return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
ofs, len, NULL) ? 1 : 0;
}
#ifdef CONFIG_MTD_OTP
typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
u_long data_offset, u_char *buf, u_int size,
u_long prot_offset, u_int groupno, u_int groupsize);
static int __xipram
do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
{
struct cfi_private *cfi = map->fldrv_priv;
int ret;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
/* let's ensure we're not reading back cached data from array mode */
INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
xip_disable(map, chip, chip->start);
if (chip->state != FL_JEDEC_QUERY) {
map_write(map, CMD(0x90), chip->start);
chip->state = FL_JEDEC_QUERY;
}
map_copy_from(map, buf, chip->start + offset, size);
xip_enable(map, chip, chip->start);
/* then ensure we don't keep OTP data in the cache */
INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
put_chip(map, chip, chip->start);
mutex_unlock(&chip->mutex);
return 0;
}
static int
do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
{
int ret;
while (size) {
unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
int gap = offset - bus_ofs;
int n = min_t(int, size, map_bankwidth(map)-gap);
map_word datum = map_word_ff(map);
datum = map_word_load_partial(map, datum, buf, gap, n);
ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
if (ret)
return ret;
offset += n;
buf += n;
size -= n;
}
return 0;
}
static int
do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word datum;
/* make sure area matches group boundaries */
if (size != grpsz)
return -EXDEV;
datum = map_word_ff(map);
datum = map_word_clr(map, datum, CMD(1 << grpno));
return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
}
static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf,
otp_op_t action, int user_regs)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
struct flchip *chip;
struct cfi_intelext_otpinfo *otp;
u_long devsize, reg_prot_offset, data_offset;
u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
int ret;
*retlen = 0;
/* Check that we actually have some OTP registers */
if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
return -ENODATA;
/* we need real chips here not virtual ones */
devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
chip_step = devsize >> cfi->chipshift;
chip_num = 0;
/* Some chips have OTP located in the _top_ partition only.
For example: Intel 28F256L18T (T means top-parameter device) */
if (cfi->mfr == CFI_MFR_INTEL) {
switch (cfi->id) {
case 0x880b:
case 0x880c:
case 0x880d:
chip_num = chip_step - 1;
}
}
for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
chip = &cfi->chips[chip_num];
otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
/* first OTP region */
field = 0;
reg_prot_offset = extp->ProtRegAddr;
reg_fact_groups = 1;
reg_fact_size = 1 << extp->FactProtRegSize;
reg_user_groups = 1;
reg_user_size = 1 << extp->UserProtRegSize;
while (len > 0) {
/* flash geometry fixup */
data_offset = reg_prot_offset + 1;
data_offset *= cfi->interleave * cfi->device_type;
reg_prot_offset *= cfi->interleave * cfi->device_type;
reg_fact_size *= cfi->interleave;
reg_user_size *= cfi->interleave;
if (user_regs) {
groups = reg_user_groups;
groupsize = reg_user_size;
/* skip over factory reg area */
groupno = reg_fact_groups;
data_offset += reg_fact_groups * reg_fact_size;
} else {
groups = reg_fact_groups;
groupsize = reg_fact_size;
groupno = 0;
}
while (len > 0 && groups > 0) {
if (!action) {
/*
* Special case: if action is NULL
* we fill buf with otp_info records.
*/
struct otp_info *otpinfo;
map_word lockword;
len -= sizeof(struct otp_info);
if (len <= 0)
return -ENOSPC;
ret = do_otp_read(map, chip,
reg_prot_offset,
(u_char *)&lockword,
map_bankwidth(map),
0, 0, 0);
if (ret)
return ret;
otpinfo = (struct otp_info *)buf;
otpinfo->start = from;
otpinfo->length = groupsize;
otpinfo->locked =
!map_word_bitsset(map, lockword,
CMD(1 << groupno));
from += groupsize;
buf += sizeof(*otpinfo);
*retlen += sizeof(*otpinfo);
} else if (from >= groupsize) {
from -= groupsize;
data_offset += groupsize;
} else {
int size = groupsize;
data_offset += from;
size -= from;
from = 0;
if (size > len)
size = len;
ret = action(map, chip, data_offset,
buf, size, reg_prot_offset,
groupno, groupsize);
if (ret < 0)
return ret;
buf += size;
len -= size;
*retlen += size;
data_offset += size;
}
groupno++;
groups--;
}
/* next OTP region */
if (++field == extp->NumProtectionFields)
break;
reg_prot_offset = otp->ProtRegAddr;
reg_fact_groups = otp->FactGroups;
reg_fact_size = 1 << otp->FactProtRegSize;
reg_user_groups = otp->UserGroups;
reg_user_size = 1 << otp->UserProtRegSize;
otp++;
}
}
return 0;
}
static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen,
u_char *buf)
{
return cfi_intelext_otp_walk(mtd, from, len, retlen,
buf, do_otp_read, 0);
}
static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen,
u_char *buf)
{
return cfi_intelext_otp_walk(mtd, from, len, retlen,
buf, do_otp_read, 1);
}
static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen,
const u_char *buf)
{
return cfi_intelext_otp_walk(mtd, from, len, retlen,
(u_char *)buf, do_otp_write, 1);
}
static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
loff_t from, size_t len)
{
size_t retlen;
return cfi_intelext_otp_walk(mtd, from, len, &retlen,
NULL, do_otp_lock, 1);
}
static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *buf)
{
return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
NULL, 0);
}
static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *buf)
{
return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
NULL, 1);
}
#endif
static void cfi_intelext_save_locks(struct mtd_info *mtd)
{
struct mtd_erase_region_info *region;
int block, status, i;
unsigned long adr;
size_t len;
for (i = 0; i < mtd->numeraseregions; i++) {
region = &mtd->eraseregions[i];
if (!region->lockmap)
continue;
for (block = 0; block < region->numblocks; block++){
len = region->erasesize;
adr = region->offset + block * len;
status = cfi_varsize_frob(mtd,
do_getlockstatus_oneblock, adr, len, NULL);
if (status)
set_bit(block, region->lockmap);
else
clear_bit(block, region->lockmap);
}
}
}
static int cfi_intelext_suspend(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
int i;
struct flchip *chip;
int ret = 0;
if ((mtd->flags & MTD_POWERUP_LOCK)
&& extp && (extp->FeatureSupport & (1 << 5)))
cfi_intelext_save_locks(mtd);
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
switch (chip->state) {
case FL_READY:
case FL_STATUS:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
if (chip->oldstate == FL_READY) {
/* place the chip in a known state before suspend */
map_write(map, CMD(0xFF), cfi->chips[i].start);
chip->oldstate = chip->state;
chip->state = FL_PM_SUSPENDED;
/* No need to wake_up() on this state change -
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
} else {
/* There seems to be an operation pending. We must wait for it. */
printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
ret = -EAGAIN;
}
break;
default:
/* Should we actually wait? Once upon a time these routines weren't
allowed to. Or should we return -EAGAIN, because the upper layers
ought to have already shut down anything which was using the device
anyway? The latter for now. */
printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
ret = -EAGAIN;
break;
case FL_PM_SUSPENDED:
break;
}
mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
if (ret) {
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
/* No need to force it into a known state here,
because we're returning failure, and it didn't
get power cycled */
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
mutex_unlock(&chip->mutex);
}
}
return ret;
}
static void cfi_intelext_restore_locks(struct mtd_info *mtd)
{
struct mtd_erase_region_info *region;
int block, i;
unsigned long adr;
size_t len;
for (i = 0; i < mtd->numeraseregions; i++) {
region = &mtd->eraseregions[i];
if (!region->lockmap)
continue;
for_each_clear_bit(block, region->lockmap, region->numblocks) {
len = region->erasesize;
adr = region->offset + block * len;
cfi_intelext_unlock(mtd, adr, len);
}
}
}
static void cfi_intelext_resume(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
int i;
struct flchip *chip;
for (i=0; i<cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
/* Refresh LH28F640BF Partition Config. Register */
fixup_LH28F640BF(mtd);
map_write(map, CMD(0xFF), cfi->chips[i].start);
chip->oldstate = chip->state = FL_READY;
wake_up(&chip->wq);
}
mutex_unlock(&chip->mutex);
}
if ((mtd->flags & MTD_POWERUP_LOCK)
&& extp && (extp->FeatureSupport & (1 << 5)))
cfi_intelext_restore_locks(mtd);
}
static int cfi_intelext_reset(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i, ret;
for (i=0; i < cfi->numchips; i++) {
struct flchip *chip = &cfi->chips[i];
/* force the completion of any ongoing operation
and switch to array mode so any bootloader in
flash is accessible for soft reboot. */
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
if (!ret) {
map_write(map, CMD(0xff), chip->start);
chip->state = FL_SHUTDOWN;
put_chip(map, chip, chip->start);
}
mutex_unlock(&chip->mutex);
}
return 0;
}
static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
void *v)
{
struct mtd_info *mtd;
mtd = container_of(nb, struct mtd_info, reboot_notifier);
cfi_intelext_reset(mtd);
return NOTIFY_DONE;
}
static void cfi_intelext_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct mtd_erase_region_info *region;
int i;
cfi_intelext_reset(mtd);
unregister_reboot_notifier(&mtd->reboot_notifier);
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
kfree(cfi->chips[0].priv);
kfree(cfi);
for (i = 0; i < mtd->numeraseregions; i++) {
region = &mtd->eraseregions[i];
kfree(region->lockmap);
}
kfree(mtd->eraseregions);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]> et al.");
MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
MODULE_ALIAS("cfi_cmdset_0003");
MODULE_ALIAS("cfi_cmdset_0200");
| linux-master | drivers/mtd/chips/cfi_cmdset_0001.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Common code to handle absent "placeholder" devices
* Copyright 2001 Resilience Corporation <[email protected]>
*
* This map driver is used to allocate "placeholder" MTD
* devices on systems that have socketed/removable media.
* Use of this driver as a fallback preserves the expected
* registration of MTD device nodes regardless of probe outcome.
* A usage example is as follows:
*
* my_dev[i] = do_map_probe("cfi", &my_map[i]);
* if(NULL == my_dev[i]) {
* my_dev[i] = do_map_probe("map_absent", &my_map[i]);
* }
*
* Any device 'probed' with this driver will return -ENODEV
* upon open.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
static int map_absent_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int map_absent_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int map_absent_erase (struct mtd_info *, struct erase_info *);
static void map_absent_sync (struct mtd_info *);
static struct mtd_info *map_absent_probe(struct map_info *map);
static void map_absent_destroy (struct mtd_info *);
static struct mtd_chip_driver map_absent_chipdrv = {
.probe = map_absent_probe,
.destroy = map_absent_destroy,
.name = "map_absent",
.module = THIS_MODULE
};
static struct mtd_info *map_absent_probe(struct map_info *map)
{
struct mtd_info *mtd;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
if (!mtd) {
return NULL;
}
map->fldrv = &map_absent_chipdrv;
mtd->priv = map;
mtd->name = map->name;
mtd->type = MTD_ABSENT;
mtd->size = map->size;
mtd->_erase = map_absent_erase;
mtd->_read = map_absent_read;
mtd->_write = map_absent_write;
mtd->_sync = map_absent_sync;
mtd->flags = 0;
mtd->erasesize = PAGE_SIZE;
mtd->writesize = 1;
__module_get(THIS_MODULE);
return mtd;
}
static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
return -ENODEV;
}
static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
{
return -ENODEV;
}
static int map_absent_erase(struct mtd_info *mtd, struct erase_info *instr)
{
return -ENODEV;
}
static void map_absent_sync(struct mtd_info *mtd)
{
/* nop */
}
static void map_absent_destroy(struct mtd_info *mtd)
{
/* nop */
}
static int __init map_absent_init(void)
{
register_mtd_chip_driver(&map_absent_chipdrv);
return 0;
}
static void __exit map_absent_exit(void)
{
unregister_mtd_chip_driver(&map_absent_chipdrv);
}
module_init(map_absent_init);
module_exit(map_absent_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Resilience Corporation - Eric Brower <[email protected]>");
MODULE_DESCRIPTION("Placeholder MTD chip driver for 'absent' chips");
| linux-master | drivers/mtd/chips/map_absent.c |
// SPDX-License-Identifier: GPL-2.0
/*
Common Flash Interface probe code.
(C) 2000 Red Hat.
See JEDEC (http://www.jedec.org/) standard JESD21C (section 3.5)
for the standard this probe goes back to.
Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/gen_probe.h>
/* AMD */
#define AM29DL800BB 0x22CB
#define AM29DL800BT 0x224A
#define AM29F800BB 0x2258
#define AM29F800BT 0x22D6
#define AM29LV400BB 0x22BA
#define AM29LV400BT 0x22B9
#define AM29LV800BB 0x225B
#define AM29LV800BT 0x22DA
#define AM29LV160DT 0x22C4
#define AM29LV160DB 0x2249
#define AM29F017D 0x003D
#define AM29F016D 0x00AD
#define AM29F080 0x00D5
#define AM29F040 0x00A4
#define AM29LV040B 0x004F
#define AM29F032B 0x0041
#define AM29F002T 0x00B0
#define AM29SL800DB 0x226B
#define AM29SL800DT 0x22EA
/* Atmel */
#define AT49BV512 0x0003
#define AT29LV512 0x003d
#define AT49BV16X 0x00C0
#define AT49BV16XT 0x00C2
#define AT49BV32X 0x00C8
#define AT49BV32XT 0x00C9
/* Eon */
#define EN29LV400AT 0x22B9
#define EN29LV400AB 0x22BA
#define EN29SL800BB 0x226B
#define EN29SL800BT 0x22EA
/* Fujitsu */
#define MBM29F040C 0x00A4
#define MBM29F800BA 0x2258
#define MBM29LV650UE 0x22D7
#define MBM29LV320TE 0x22F6
#define MBM29LV320BE 0x22F9
#define MBM29LV160TE 0x22C4
#define MBM29LV160BE 0x2249
#define MBM29LV800BA 0x225B
#define MBM29LV800TA 0x22DA
#define MBM29LV400TC 0x22B9
#define MBM29LV400BC 0x22BA
/* Hyundai */
#define HY29F002T 0x00B0
/* Intel */
#define I28F004B3T 0x00d4
#define I28F004B3B 0x00d5
#define I28F400B3T 0x8894
#define I28F400B3B 0x8895
#define I28F008S5 0x00a6
#define I28F016S5 0x00a0
#define I28F008SA 0x00a2
#define I28F008B3T 0x00d2
#define I28F008B3B 0x00d3
#define I28F800B3T 0x8892
#define I28F800B3B 0x8893
#define I28F016S3 0x00aa
#define I28F016B3T 0x00d0
#define I28F016B3B 0x00d1
#define I28F160B3T 0x8890
#define I28F160B3B 0x8891
#define I28F320B3T 0x8896
#define I28F320B3B 0x8897
#define I28F640B3T 0x8898
#define I28F640B3B 0x8899
#define I28F640C3B 0x88CD
#define I28F160F3T 0x88F3
#define I28F160F3B 0x88F4
#define I28F160C3T 0x88C2
#define I28F160C3B 0x88C3
#define I82802AB 0x00ad
#define I82802AC 0x00ac
/* Macronix */
#define MX29LV040C 0x004F
#define MX29LV160T 0x22C4
#define MX29LV160B 0x2249
#define MX29F040 0x00A4
#define MX29F016 0x00AD
#define MX29F002T 0x00B0
#define MX29F004T 0x0045
#define MX29F004B 0x0046
/* NEC */
#define UPD29F064115 0x221C
/* PMC */
#define PM49FL002 0x006D
#define PM49FL004 0x006E
#define PM49FL008 0x006A
/* Sharp */
#define LH28F640BF 0x00B0
/* ST - www.st.com */
#define M29F800AB 0x0058
#define M29W800DT 0x22D7
#define M29W800DB 0x225B
#define M29W400DT 0x00EE
#define M29W400DB 0x00EF
#define M29W160DT 0x22C4
#define M29W160DB 0x2249
#define M29W040B 0x00E3
#define M50FW040 0x002C
#define M50FW080 0x002D
#define M50FW016 0x002E
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
#define M50FLW080B 0x0081
#define PSD4256G6V 0x00e9
/* SST */
#define SST29EE020 0x0010
#define SST29LE020 0x0012
#define SST29EE512 0x005d
#define SST29LE512 0x003d
#define SST39LF800 0x2781
#define SST39LF160 0x2782
#define SST39VF1601 0x234b
#define SST39VF3201 0x235b
#define SST39WF1601 0x274b
#define SST39WF1602 0x274a
#define SST39LF512 0x00D4
#define SST39LF010 0x00D5
#define SST39LF020 0x00D6
#define SST39LF040 0x00D7
#define SST39SF010A 0x00B5
#define SST39SF020A 0x00B6
#define SST39SF040 0x00B7
#define SST49LF004B 0x0060
#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
#define SST49LF030A 0x001C
#define SST49LF040A 0x0051
#define SST49LF080A 0x005B
#define SST36VF3203 0x7354
/* Toshiba */
#define TC58FVT160 0x00C2
#define TC58FVB160 0x0043
#define TC58FVT321 0x009A
#define TC58FVB321 0x009C
#define TC58FVT641 0x0093
#define TC58FVB641 0x0095
/* Winbond */
#define W49V002A 0x00b0
/*
* Unlock address sets for AMD command sets.
* Intel command sets use the MTD_UADDR_UNNECESSARY.
* Each identifier, except MTD_UADDR_UNNECESSARY, and
* MTD_UADDR_NO_SUPPORT must be defined below in unlock_addrs[].
* MTD_UADDR_NOT_SUPPORTED must be 0 so that structure
* initialization need not require initializing all of the
* unlock addresses for all bit widths.
*/
enum uaddr {
MTD_UADDR_NOT_SUPPORTED = 0, /* data width not supported */
MTD_UADDR_0x0555_0x02AA,
MTD_UADDR_0x0555_0x0AAA,
MTD_UADDR_0x5555_0x2AAA,
MTD_UADDR_0x0AAA_0x0554,
MTD_UADDR_0x0AAA_0x0555,
MTD_UADDR_0xAAAA_0x5555,
MTD_UADDR_DONT_CARE, /* Requires an arbitrary address */
MTD_UADDR_UNNECESSARY, /* Does not require any address */
};
struct unlock_addr {
uint32_t addr1;
uint32_t addr2;
};
/*
* I don't like the fact that the first entry in unlock_addrs[]
* exists, but is for MTD_UADDR_NOT_SUPPORTED - and, therefore,
* should not be used. The problem is that structures with
* initializers have extra fields initialized to 0. It is _very_
* desirable to have the unlock address entries for unsupported
* data widths automatically initialized - that means that
* MTD_UADDR_NOT_SUPPORTED must be 0 and the first entry here
* must go unused.
*/
static const struct unlock_addr unlock_addrs[] = {
[MTD_UADDR_NOT_SUPPORTED] = {
.addr1 = 0xffff,
.addr2 = 0xffff
},
[MTD_UADDR_0x0555_0x02AA] = {
.addr1 = 0x0555,
.addr2 = 0x02aa
},
[MTD_UADDR_0x0555_0x0AAA] = {
.addr1 = 0x0555,
.addr2 = 0x0aaa
},
[MTD_UADDR_0x5555_0x2AAA] = {
.addr1 = 0x5555,
.addr2 = 0x2aaa
},
[MTD_UADDR_0x0AAA_0x0554] = {
.addr1 = 0x0AAA,
.addr2 = 0x0554
},
[MTD_UADDR_0x0AAA_0x0555] = {
.addr1 = 0x0AAA,
.addr2 = 0x0555
},
[MTD_UADDR_0xAAAA_0x5555] = {
.addr1 = 0xaaaa,
.addr2 = 0x5555
},
[MTD_UADDR_DONT_CARE] = {
.addr1 = 0x0000, /* Doesn't matter which address */
.addr2 = 0x0000 /* is used - must be last entry */
},
[MTD_UADDR_UNNECESSARY] = {
.addr1 = 0x0000,
.addr2 = 0x0000
}
};
struct amd_flash_info {
const char *name;
const uint16_t mfr_id;
const uint16_t dev_id;
const uint8_t dev_size;
const uint8_t nr_regions;
const uint16_t cmd_set;
const uint32_t regions[6];
const uint8_t devtypes; /* Bitmask for x8, x16 etc. */
const uint8_t uaddr; /* unlock addrs for 8, 16, 32, 64 */
};
#define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
#define SIZE_64KiB 16
#define SIZE_128KiB 17
#define SIZE_256KiB 18
#define SIZE_512KiB 19
#define SIZE_1MiB 20
#define SIZE_2MiB 21
#define SIZE_4MiB 22
#define SIZE_8MiB 23
/*
* Please keep this list ordered by manufacturer!
* Fortunately, the list isn't searched often and so a
* slow, linear search isn't so bad.
*/
static const struct amd_flash_info jedec_table[] = {
{
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29F032B,
.name = "AMD AM29F032B",
.uaddr = MTD_UADDR_0x0555_0x02AA,
.devtypes = CFI_DEVICETYPE_X8,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,64)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV160DT,
.name = "AMD AM29LV160DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV160DB,
.name = "AMD AM29LV160DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,31)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV400BB,
.name = "AMD AM29LV400BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,7)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV400BT,
.name = "AMD AM29LV400BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,7),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV800BB,
.name = "AMD AM29LV800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,15),
}
}, {
/* add DL */
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29DL800BB,
.name = "AMD AM29DL800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 6,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,4),
ERASEINFO(0x08000,1),
ERASEINFO(0x04000,1),
ERASEINFO(0x10000,14)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29DL800BT,
.name = "AMD AM29DL800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 6,
.regions = {
ERASEINFO(0x10000,14),
ERASEINFO(0x04000,1),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,4),
ERASEINFO(0x08000,1),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29F800BB,
.name = "AMD AM29F800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,15),
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV800BT,
.name = "AMD AM29LV800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29F800BT,
.name = "AMD AM29F800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29F017D,
.name = "AMD AM29F017D",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_DONT_CARE,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29F016D,
.name = "AMD AM29F016D",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29F080,
.name = "AMD AM29F080",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29F040,
.name = "AMD AM29F040",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV040B,
.name = "AMD AM29LV040B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29F002T,
.name = "AMD AM29F002T",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1),
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29SL800DT,
.name = "AMD AM29SL800DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1),
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29SL800DB,
.name = "AMD AM29SL800DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,15),
}
}, {
.mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV512,
.name = "Atmel AT49BV512",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_64KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,1)
}
}, {
.mfr_id = CFI_MFR_ATMEL,
.dev_id = AT29LV512,
.name = "Atmel AT29LV512",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_64KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x80,256),
ERASEINFO(0x80,256)
}
}, {
.mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV16X,
.name = "Atmel AT49BV16X",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,31)
}
}, {
.mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV16XT,
.name = "Atmel AT49BV16XT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x02000,8)
}
}, {
.mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV32X,
.name = "Atmel AT49BV32X",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,63)
}
}, {
.mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV32XT,
.name = "Atmel AT49BV32XT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000,63),
ERASEINFO(0x02000,8)
}
}, {
.mfr_id = CFI_MFR_EON,
.dev_id = EN29LV400AT,
.name = "Eon EN29LV400AT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,7),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1),
}
}, {
.mfr_id = CFI_MFR_EON,
.dev_id = EN29LV400AB,
.name = "Eon EN29LV400AB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,7),
}
}, {
.mfr_id = CFI_MFR_EON,
.dev_id = EN29SL800BT,
.name = "Eon EN29SL800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1),
}
}, {
.mfr_id = CFI_MFR_EON,
.dev_id = EN29SL800BB,
.name = "Eon EN29SL800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,15),
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29F040C,
.name = "Fujitsu MBM29F040C",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29F800BA,
.name = "Fujitsu MBM29F800BA",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,15),
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV650UE,
.name = "Fujitsu MBM29LV650UE",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_DONT_CARE,
.dev_size = SIZE_8MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,128)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV320TE,
.name = "Fujitsu MBM29LV320TE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000,63),
ERASEINFO(0x02000,8)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV320BE,
.name = "Fujitsu MBM29LV320BE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,63)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV160TE,
.name = "Fujitsu MBM29LV160TE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV160BE,
.name = "Fujitsu MBM29LV160BE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,31)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV800BA,
.name = "Fujitsu MBM29LV800BA",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,15)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV800TA,
.name = "Fujitsu MBM29LV800TA",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV400BC,
.name = "Fujitsu MBM29LV400BC",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,7)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV400TC,
.name = "Fujitsu MBM29LV400TC",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,7),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_HYUNDAI,
.dev_id = HY29F002T,
.name = "Hyundai HY29F002T",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F004B3B,
.name = "Intel 28F004B3B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 7),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F004B3T,
.name = "Intel 28F004B3T",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 7),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F400B3B,
.name = "Intel 28F400B3B",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 7),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F400B3T,
.name = "Intel 28F400B3T",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 7),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008B3B,
.name = "Intel 28F008B3B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 15),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008B3T,
.name = "Intel 28F008B3T",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 15),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008S5,
.name = "Intel 28F008S5",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016S5,
.name = "Intel 28F016S5",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008SA,
.name = "Intel 28F008SA",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000, 16),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F800B3B,
.name = "Intel 28F800B3B",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 15),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F800B3T,
.name = "Intel 28F800B3T",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 15),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016B3B,
.name = "Intel 28F016B3B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 31),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016S3,
.name = "Intel I28F016S3",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000, 32),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016B3T,
.name = "Intel 28F016B3T",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 31),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F160B3B,
.name = "Intel 28F160B3B",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 31),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F160B3T,
.name = "Intel 28F160B3T",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 31),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F320B3B,
.name = "Intel 28F320B3B",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 63),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F320B3T,
.name = "Intel 28F320B3T",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 63),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F640B3B,
.name = "Intel 28F640B3B",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_8MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 127),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F640B3T,
.name = "Intel 28F640B3T",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_8MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 127),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F640C3B,
.name = "Intel 28F640C3B",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_8MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 127),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I82802AB,
.name = "Intel 82802AB",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I82802AC,
.name = "Intel 82802AC",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
}, {
.mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29LV040C,
.name = "Macronix MX29LV040C",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
}, {
.mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29LV160T,
.name = "MXIC MX29LV160T",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_NEC,
.dev_id = UPD29F064115,
.name = "NEC uPD29F064115",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_0xAAAA_0x5555,
.dev_size = SIZE_8MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 3,
.regions = {
ERASEINFO(0x2000,8),
ERASEINFO(0x10000,126),
ERASEINFO(0x2000,8),
}
}, {
.mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29LV160B,
.name = "MXIC MX29LV160B",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,31)
}
}, {
.mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F040,
.name = "Macronix MX29F040",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
}, {
.mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F016,
.name = "Macronix MX29F016",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
}, {
.mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F004T,
.name = "Macronix MX29F004T",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,7),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1),
}
}, {
.mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F004B,
.name = "Macronix MX29F004B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,7),
}
}, {
.mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F002T,
.name = "Macronix MX29F002T",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1),
}
}, {
.mfr_id = CFI_MFR_PMC,
.dev_id = PM49FL002,
.name = "PMC Pm49FL002",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO( 0x01000, 64 )
}
}, {
.mfr_id = CFI_MFR_PMC,
.dev_id = PM49FL004,
.name = "PMC Pm49FL004",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO( 0x01000, 128 )
}
}, {
.mfr_id = CFI_MFR_PMC,
.dev_id = PM49FL008,
.name = "PMC Pm49FL008",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO( 0x01000, 256 )
}
}, {
.mfr_id = CFI_MFR_SHARP,
.dev_id = LH28F640BF,
.name = "LH28F640BF",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_8MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 127),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST39LF512,
.name = "SST 39LF512",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_64KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,16),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST39LF010,
.name = "SST 39LF010",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_128KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,32),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST29EE020,
.name = "SST 29EE020",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_SST_PAGE,
.nr_regions = 1,
.regions = {ERASEINFO(0x01000,64),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST29LE020,
.name = "SST 29LE020",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_SST_PAGE,
.nr_regions = 1,
.regions = {ERASEINFO(0x01000,64),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST39LF020,
.name = "SST 39LF020",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,64),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST39LF040,
.name = "SST 39LF040",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,128),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST39SF010A,
.name = "SST 39SF010A",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_128KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,32),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST39SF020A,
.name = "SST 39SF020A",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,64),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST39SF040,
.name = "SST 39SF040",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,128),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST49LF040B,
.name = "SST 49LF040B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,128),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST49LF004B,
.name = "SST 49LF004B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,128),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST49LF008A,
.name = "SST 49LF008A",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,256),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST49LF030A,
.name = "SST 49LF030A",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,96),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST49LF040A,
.name = "SST 49LF040A",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,128),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST49LF080A,
.name = "SST 49LF080A",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,256),
}
}, {
.mfr_id = CFI_MFR_SST, /* should be CFI */
.dev_id = SST39LF160,
.name = "SST 39LF160",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_0xAAAA_0x5555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x1000,256),
ERASEINFO(0x1000,256)
}
}, {
.mfr_id = CFI_MFR_SST, /* should be CFI */
.dev_id = SST39VF1601,
.name = "SST 39VF1601",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_0xAAAA_0x5555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x1000,256),
ERASEINFO(0x1000,256)
}
}, {
/* CFI is broken: reports AMD_STD, but needs custom uaddr */
.mfr_id = CFI_MFR_SST,
.dev_id = SST39WF1601,
.name = "SST 39WF1601",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_0xAAAA_0x5555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x1000,256),
ERASEINFO(0x1000,256)
}
}, {
/* CFI is broken: reports AMD_STD, but needs custom uaddr */
.mfr_id = CFI_MFR_SST,
.dev_id = SST39WF1602,
.name = "SST 39WF1602",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_0xAAAA_0x5555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x1000,256),
ERASEINFO(0x1000,256)
}
}, {
.mfr_id = CFI_MFR_SST, /* should be CFI */
.dev_id = SST39VF3201,
.name = "SST 39VF3201",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_0xAAAA_0x5555,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x1000,256),
ERASEINFO(0x1000,256),
ERASEINFO(0x1000,256),
ERASEINFO(0x1000,256)
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST36VF3203,
.name = "SST 36VF3203",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,64),
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M29F800AB,
.name = "ST M29F800AB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,15),
}
}, {
.mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W800DT,
.name = "ST M29W800DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W800DB,
.name = "ST M29W800DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,15)
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M29W400DT,
.name = "ST M29W400DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,7),
ERASEINFO(0x02000,1),
ERASEINFO(0x08000,2),
ERASEINFO(0x10000,1)
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M29W400DB,
.name = "ST M29W400DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,7)
}
}, {
.mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W160DT,
.name = "ST M29W160DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W160DB,
.name = "ST M29W160DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,31)
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M29W040B,
.name = "ST M29W040B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M50FW040,
.name = "ST M50FW040",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M50FW080,
.name = "ST M50FW080",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M50FW016,
.name = "ST M50FW016",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M50LPW080,
.name = "ST M50LPW080",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
},
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M50FLW080A,
.name = "ST M50FLW080A",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 4,
.regions = {
ERASEINFO(0x1000,16),
ERASEINFO(0x10000,13),
ERASEINFO(0x1000,16),
ERASEINFO(0x1000,16),
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M50FLW080B,
.name = "ST M50FLW080B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 4,
.regions = {
ERASEINFO(0x1000,16),
ERASEINFO(0x1000,16),
ERASEINFO(0x10000,13),
ERASEINFO(0x1000,16),
}
}, {
.mfr_id = 0xff00 | CFI_MFR_ST,
.dev_id = 0xff00 | PSD4256G6V,
.name = "ST PSD4256G6V",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_0x0AAA_0x0554,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
}, {
.mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVT160,
.name = "Toshiba TC58FVT160",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVB160,
.name = "Toshiba TC58FVB160",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,31)
}
}, {
.mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVB321,
.name = "Toshiba TC58FVB321",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,63)
}
}, {
.mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVT321,
.name = "Toshiba TC58FVT321",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000,63),
ERASEINFO(0x02000,8)
}
}, {
.mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVB641,
.name = "Toshiba TC58FVB641",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_8MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,127)
}
}, {
.mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVT641,
.name = "Toshiba TC58FVT641",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_8MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000,127),
ERASEINFO(0x02000,8)
}
}, {
.mfr_id = CFI_MFR_WINBOND,
.dev_id = W49V002A,
.name = "Winbond W49V002A",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000, 3),
ERASEINFO(0x08000, 1),
ERASEINFO(0x02000, 2),
ERASEINFO(0x04000, 1),
}
}
};
static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
struct cfi_private *cfi)
{
map_word result;
unsigned long mask;
int bank = 0;
/* According to JEDEC "Standard Manufacturer's Identification Code"
* (http://www.jedec.org/download/search/jep106W.pdf)
* several first banks can contain 0x7f instead of actual ID
*/
do {
uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
mask = (1 << (cfi->device_type * 8)) - 1;
if (ofs >= map->size)
return 0;
result = map_read(map, base + ofs);
bank++;
} while ((result.x[0] & mask) == CFI_MFR_CONTINUATION);
return result.x[0] & mask;
}
static inline u32 jedec_read_id(struct map_info *map, uint32_t base,
struct cfi_private *cfi)
{
map_word result;
unsigned long mask;
u32 ofs = cfi_build_cmd_addr(1, map, cfi);
mask = (1 << (cfi->device_type * 8)) -1;
result = map_read(map, base + ofs);
return result.x[0] & mask;
}
static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
{
/* Reset */
/* after checking the datasheets for SST, MACRONIX and ATMEL
* (oh and incidentaly the jedec spec - 3.5.3.3) the reset
* sequence is *supposed* to be 0xaa at 0x5555, 0x55 at
* 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
* as they will ignore the writes and don't care what address
* the F0 is written to */
if (cfi->addr_unlock1) {
pr_debug( "reset unlock called %x %x \n",
cfi->addr_unlock1,cfi->addr_unlock2);
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
}
cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
/* Some misdesigned Intel chips do not respond for 0xF0 for a reset,
* so ensure we're in read mode. Send both the Intel and the AMD command
* for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so
* this should be safe.
*/
cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
/* FIXME - should have reset delay before continuing */
}
static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int index)
{
int i,num_erase_regions;
uint8_t uaddr;
if (!(jedec_table[index].devtypes & cfi->device_type)) {
pr_debug("Rejecting potential %s with incompatible %d-bit device type\n",
jedec_table[index].name, 4 * (1<<cfi->device_type));
return 0;
}
printk(KERN_INFO "Found: %s\n",jedec_table[index].name);
num_erase_regions = jedec_table[index].nr_regions;
cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
if (!cfi->cfiq) {
//xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
return 0;
}
memset(cfi->cfiq, 0, sizeof(struct cfi_ident));
cfi->cfiq->P_ID = jedec_table[index].cmd_set;
cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
cfi->cfiq->DevSize = jedec_table[index].dev_size;
cfi->cfi_mode = CFI_MODE_JEDEC;
cfi->sector_erase_cmd = CMD(0x30);
for (i=0; i<num_erase_regions; i++){
cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
}
cfi->cmdset_priv = NULL;
/* This may be redundant for some cases, but it doesn't hurt */
cfi->mfr = jedec_table[index].mfr_id;
cfi->id = jedec_table[index].dev_id;
uaddr = jedec_table[index].uaddr;
/* The table has unlock addresses in _bytes_, and we try not to let
our brains explode when we see the datasheets talking about address
lines numbered from A-1 to A18. The CFI table has unlock addresses
in device-words according to the mode the device is connected in */
cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / cfi->device_type;
cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / cfi->device_type;
return 1; /* ok */
}
/*
* There is a BIG problem properly ID'ing the JEDEC device and guaranteeing
* the mapped address, unlock addresses, and proper chip ID. This function
* attempts to minimize errors. It is doubtfull that this probe will ever
* be perfect - consequently there should be some module parameters that
* could be manually specified to force the chip info.
*/
static inline int jedec_match( uint32_t base,
struct map_info *map,
struct cfi_private *cfi,
const struct amd_flash_info *finfo )
{
int rc = 0; /* failure until all tests pass */
u32 mfr, id;
uint8_t uaddr;
/*
* The IDs must match. For X16 and X32 devices operating in
* a lower width ( X8 or X16 ), the device ID's are usually just
* the lower byte(s) of the larger device ID for wider mode. If
* a part is found that doesn't fit this assumption (device id for
* smaller width mode is completely unrealated to full-width mode)
* then the jedec_table[] will have to be augmented with the IDs
* for different widths.
*/
switch (cfi->device_type) {
case CFI_DEVICETYPE_X8:
mfr = (uint8_t)finfo->mfr_id;
id = (uint8_t)finfo->dev_id;
/* bjd: it seems that if we do this, we can end up
* detecting 16bit flashes as an 8bit device, even though
* there aren't.
*/
if (finfo->dev_id > 0xff) {
pr_debug("%s(): ID is not 8bit\n",
__func__);
goto match_done;
}
break;
case CFI_DEVICETYPE_X16:
mfr = (uint16_t)finfo->mfr_id;
id = (uint16_t)finfo->dev_id;
break;
case CFI_DEVICETYPE_X32:
mfr = (uint16_t)finfo->mfr_id;
id = (uint32_t)finfo->dev_id;
break;
default:
printk(KERN_WARNING
"MTD %s(): Unsupported device type %d\n",
__func__, cfi->device_type);
goto match_done;
}
if ( cfi->mfr != mfr || cfi->id != id ) {
goto match_done;
}
/* the part size must fit in the memory window */
pr_debug("MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
__func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) );
if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) {
pr_debug("MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
__func__, finfo->mfr_id, finfo->dev_id,
1 << finfo->dev_size );
goto match_done;
}
if (! (finfo->devtypes & cfi->device_type))
goto match_done;
uaddr = finfo->uaddr;
pr_debug("MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
__func__, cfi->addr_unlock1, cfi->addr_unlock2 );
if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
&& ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 ||
unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) {
pr_debug("MTD %s(): 0x%.4x 0x%.4x did not match\n",
__func__,
unlock_addrs[uaddr].addr1,
unlock_addrs[uaddr].addr2);
goto match_done;
}
/*
* Make sure the ID's disappear when the device is taken out of
* ID mode. The only time this should fail when it should succeed
* is when the ID's are written as data to the same
* addresses. For this rare and unfortunate case the chip
* cannot be probed correctly.
* FIXME - write a driver that takes all of the chip info as
* module parameters, doesn't probe but forces a load.
*/
pr_debug("MTD %s(): check ID's disappear when not in ID mode\n",
__func__ );
jedec_reset( base, map, cfi );
mfr = jedec_read_mfr( map, base, cfi );
id = jedec_read_id( map, base, cfi );
if ( mfr == cfi->mfr && id == cfi->id ) {
pr_debug("MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n"
"You might need to manually specify JEDEC parameters.\n",
__func__, cfi->mfr, cfi->id );
goto match_done;
}
/* all tests passed - mark as success */
rc = 1;
/*
* Put the device back in ID mode - only need to do this if we
* were truly frobbing a real device.
*/
pr_debug("MTD %s(): return to ID mode\n", __func__ );
if (cfi->addr_unlock1) {
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
}
cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
/* FIXME - should have a delay before continuing */
match_done:
return rc;
}
static int jedec_probe_chip(struct map_info *map, __u32 base,
unsigned long *chip_map, struct cfi_private *cfi)
{
int i;
enum uaddr uaddr_idx = MTD_UADDR_NOT_SUPPORTED;
u32 probe_offset1, probe_offset2;
retry:
if (!cfi->numchips) {
uaddr_idx++;
if (MTD_UADDR_UNNECESSARY == uaddr_idx)
return 0;
cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1 / cfi->device_type;
cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2 / cfi->device_type;
}
/* Make certain we aren't probing past the end of map */
if (base >= map->size) {
printk(KERN_NOTICE
"Probe at base(0x%08x) past the end of the map(0x%08lx)\n",
base, map->size -1);
return 0;
}
/* Ensure the unlock addresses we try stay inside the map */
probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, map, cfi);
probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, map, cfi);
if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
((base + probe_offset2 + map_bankwidth(map)) >= map->size))
goto retry;
/* Reset */
jedec_reset(base, map, cfi);
/* Autoselect Mode */
if(cfi->addr_unlock1) {
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
}
cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
/* FIXME - should have a delay before continuing */
if (!cfi->numchips) {
/* This is the first time we're called. Set up the CFI
stuff accordingly and return */
cfi->mfr = jedec_read_mfr(map, base, cfi);
cfi->id = jedec_read_id(map, base, cfi);
pr_debug("Search for id:(%02x %02x) interleave(%d) type(%d)\n",
cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
for (i = 0; i < ARRAY_SIZE(jedec_table); i++) {
if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
pr_debug("MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
__func__, cfi->mfr, cfi->id,
cfi->addr_unlock1, cfi->addr_unlock2 );
if (!cfi_jedec_setup(map, cfi, i))
return 0;
goto ok_out;
}
}
goto retry;
} else {
uint16_t mfr;
uint16_t id;
/* Make sure it is a chip of the same manufacturer and id */
mfr = jedec_read_mfr(map, base, cfi);
id = jedec_read_id(map, base, cfi);
if ((mfr != cfi->mfr) || (id != cfi->id)) {
printk(KERN_DEBUG "%s: Found different chip or no chip at all (mfr 0x%x, id 0x%x) at 0x%x\n",
map->name, mfr, id, base);
jedec_reset(base, map, cfi);
return 0;
}
}
/* Check each previous chip locations to see if it's an alias */
for (i=0; i < (base >> cfi->chipshift); i++) {
unsigned long start;
if(!test_bit(i, chip_map)) {
continue; /* Skip location; no valid chip at this address */
}
start = i << cfi->chipshift;
if (jedec_read_mfr(map, start, cfi) == cfi->mfr &&
jedec_read_id(map, start, cfi) == cfi->id) {
/* Eep. This chip also looks like it's in autoselect mode.
Is it an alias for the new one? */
jedec_reset(start, map, cfi);
/* If the device IDs go away, it's an alias */
if (jedec_read_mfr(map, base, cfi) != cfi->mfr ||
jedec_read_id(map, base, cfi) != cfi->id) {
printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
map->name, base, start);
return 0;
}
/* Yes, it's actually got the device IDs as data. Most
* unfortunate. Stick the new chip in read mode
* too and if it's the same, assume it's an alias. */
/* FIXME: Use other modes to do a proper check */
jedec_reset(base, map, cfi);
if (jedec_read_mfr(map, base, cfi) == cfi->mfr &&
jedec_read_id(map, base, cfi) == cfi->id) {
printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
map->name, base, start);
return 0;
}
}
}
/* OK, if we got to here, then none of the previous chips appear to
be aliases for the current one. */
set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
cfi->numchips++;
ok_out:
/* Put it back into Read Mode */
jedec_reset(base, map, cfi);
printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
map->name, cfi_interleave(cfi), cfi->device_type*8, base,
map->bankwidth*8);
return 1;
}
static struct chip_probe jedec_chip_probe = {
.name = "JEDEC",
.probe_chip = jedec_probe_chip
};
static struct mtd_info *jedec_probe(struct map_info *map)
{
/*
* Just use the generic probe stuff to call our CFI-specific
* chip_probe routine in all the possible permutations, etc.
*/
return mtd_do_chip_probe(map, &jedec_chip_probe);
}
static struct mtd_chip_driver jedec_chipdrv = {
.probe = jedec_probe,
.name = "jedec_probe",
.module = THIS_MODULE
};
static int __init jedec_probe_init(void)
{
register_mtd_chip_driver(&jedec_chipdrv);
return 0;
}
static void __exit jedec_probe_exit(void)
{
unregister_mtd_chip_driver(&jedec_chipdrv);
}
module_init(jedec_probe_init);
module_exit(jedec_probe_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Erwin Authried <[email protected]> et al.");
MODULE_DESCRIPTION("Probe code for JEDEC-compliant flash chips");
| linux-master | drivers/mtd/chips/jedec_probe.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Routines common to all CFI-type probes.
* (C) 2001-2003 Red Hat, Inc.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/gen_probe.h>
static struct mtd_info *check_cmd_set(struct map_info *, int);
static struct cfi_private *genprobe_ident_chips(struct map_info *map,
struct chip_probe *cp);
static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
struct cfi_private *cfi);
struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
{
struct mtd_info *mtd;
struct cfi_private *cfi;
/* First probe the map to see if we have CFI stuff there. */
cfi = genprobe_ident_chips(map, cp);
if (!cfi)
return NULL;
map->fldrv_priv = cfi;
/* OK we liked it. Now find a driver for the command set it talks */
mtd = check_cmd_set(map, 1); /* First the primary cmdset */
if (!mtd)
mtd = check_cmd_set(map, 0); /* Then the secondary */
if (mtd) {
if (mtd->size > map->size) {
printk(KERN_WARNING "Reducing visibility of %ldKiB chip to %ldKiB\n",
(unsigned long)mtd->size >> 10,
(unsigned long)map->size >> 10);
mtd->size = map->size;
}
return mtd;
}
printk(KERN_WARNING"gen_probe: No supported Vendor Command Set found\n");
kfree(cfi->cfiq);
kfree(cfi);
map->fldrv_priv = NULL;
return NULL;
}
EXPORT_SYMBOL(mtd_do_chip_probe);
static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chip_probe *cp)
{
struct cfi_private cfi;
struct cfi_private *retcfi;
unsigned long *chip_map;
int max_chips;
int i, j;
memset(&cfi, 0, sizeof(cfi));
/* Call the probetype-specific code with all permutations of
interleave and device type, etc. */
if (!genprobe_new_chip(map, cp, &cfi)) {
/* The probe didn't like it */
pr_debug("%s: Found no %s device at location zero\n",
cp->name, map->name);
return NULL;
}
#if 0 /* Let the CFI probe routine do this sanity check. The Intel and AMD
probe routines won't ever return a broken CFI structure anyway,
because they make them up themselves.
*/
if (cfi.cfiq->NumEraseRegions == 0) {
printk(KERN_WARNING "Number of erase regions is zero\n");
kfree(cfi.cfiq);
return NULL;
}
#endif
cfi.chipshift = cfi.cfiq->DevSize;
if (cfi_interleave_is_1(&cfi)) {
;
} else if (cfi_interleave_is_2(&cfi)) {
cfi.chipshift++;
} else if (cfi_interleave_is_4((&cfi))) {
cfi.chipshift += 2;
} else if (cfi_interleave_is_8(&cfi)) {
cfi.chipshift += 3;
} else {
BUG();
}
cfi.numchips = 1;
/*
* Allocate memory for bitmap of valid chips.
* Align bitmap storage size to full byte.
*/
max_chips = map->size >> cfi.chipshift;
if (!max_chips) {
printk(KERN_WARNING "NOR chip too large to fit in mapping. Attempting to cope...\n");
max_chips = 1;
}
chip_map = bitmap_zalloc(max_chips, GFP_KERNEL);
if (!chip_map) {
kfree(cfi.cfiq);
return NULL;
}
set_bit(0, chip_map); /* Mark first chip valid */
/*
* Now probe for other chips, checking sensibly for aliases while
* we're at it. The new_chip probe above should have let the first
* chip in read mode.
*/
for (i = 1; i < max_chips; i++) {
cp->probe_chip(map, i << cfi.chipshift, chip_map, &cfi);
}
/*
* Now allocate the space for the structures we need to return to
* our caller, and copy the appropriate data into them.
*/
retcfi = kmalloc(struct_size(retcfi, chips, cfi.numchips), GFP_KERNEL);
if (!retcfi) {
kfree(cfi.cfiq);
bitmap_free(chip_map);
return NULL;
}
memcpy(retcfi, &cfi, sizeof(cfi));
memset(&retcfi->chips[0], 0, sizeof(struct flchip) * cfi.numchips);
for (i = 0, j = 0; (j < cfi.numchips) && (i < max_chips); i++) {
if(test_bit(i, chip_map)) {
struct flchip *pchip = &retcfi->chips[j++];
pchip->start = (i << cfi.chipshift);
pchip->state = FL_READY;
init_waitqueue_head(&pchip->wq);
mutex_init(&pchip->mutex);
}
}
bitmap_free(chip_map);
return retcfi;
}
static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
struct cfi_private *cfi)
{
int min_chips = (map_bankwidth(map)/4?:1); /* At most 4-bytes wide. */
int max_chips = map_bankwidth(map); /* And minimum 1 */
int nr_chips, type;
for (nr_chips = max_chips; nr_chips >= min_chips; nr_chips >>= 1) {
if (!cfi_interleave_supported(nr_chips))
continue;
cfi->interleave = nr_chips;
/* Minimum device size. Don't look for one 8-bit device
in a 16-bit bus, etc. */
type = map_bankwidth(map) / nr_chips;
for (; type <= CFI_DEVICETYPE_X32; type<<=1) {
cfi->device_type = type;
if (cp->probe_chip(map, 0, NULL, cfi))
return 1;
}
}
return 0;
}
typedef struct mtd_info *cfi_cmdset_fn_t(struct map_info *, int);
extern cfi_cmdset_fn_t cfi_cmdset_0001;
extern cfi_cmdset_fn_t cfi_cmdset_0002;
extern cfi_cmdset_fn_t cfi_cmdset_0020;
static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
__u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
#ifdef CONFIG_MODULES
cfi_cmdset_fn_t *probe_function;
char *probename;
probename = kasprintf(GFP_KERNEL, "cfi_cmdset_%4.4X", type);
if (!probename)
return NULL;
probe_function = __symbol_get(probename);
if (!probe_function) {
request_module("cfi_cmdset_%4.4X", type);
probe_function = __symbol_get(probename);
}
kfree(probename);
if (probe_function) {
struct mtd_info *mtd;
mtd = (*probe_function)(map, primary);
/* If it was happy, it'll have increased its own use count */
symbol_put_addr(probe_function);
return mtd;
}
#endif
printk(KERN_NOTICE "Support for command set %04X not present\n", type);
return NULL;
}
static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
__u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
if (type == P_ID_NONE || type == P_ID_RESERVED)
return NULL;
switch(type){
/* We need these for the !CONFIG_MODULES case,
because symbol_get() doesn't work there */
#ifdef CONFIG_MTD_CFI_INTELEXT
case P_ID_INTEL_EXT:
case P_ID_INTEL_STD:
case P_ID_INTEL_PERFORMANCE:
return cfi_cmdset_0001(map, primary);
#endif
#ifdef CONFIG_MTD_CFI_AMDSTD
case P_ID_AMD_STD:
case P_ID_SST_OLD:
case P_ID_WINBOND:
return cfi_cmdset_0002(map, primary);
#endif
#ifdef CONFIG_MTD_CFI_STAA
case P_ID_ST_ADV:
return cfi_cmdset_0020(map, primary);
#endif
default:
return cfi_cmdset_unknown(map, primary);
}
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <[email protected]>");
MODULE_DESCRIPTION("Helper routines for flash chip probe code");
| linux-master | drivers/mtd/chips/gen_probe.c |
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/random.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/mtd/nand-ecc-sw-hamming.h>
#include "mtd_test.h"
/*
* Test the implementation for software ECC
*
* No actual MTD device is needed, So we don't need to warry about losing
* important data by human error.
*
* This covers possible patterns of corruption which can be reliably corrected
* or detected.
*/
#if IS_ENABLED(CONFIG_MTD_RAW_NAND)
struct nand_ecc_test {
const char *name;
void (*prepare)(void *, void *, void *, void *, const size_t);
int (*verify)(void *, void *, void *, const size_t);
};
/*
* The reason for this __change_bit_le() instead of __change_bit() is to inject
* bit error properly within the region which is not a multiple of
* sizeof(unsigned long) on big-endian systems
*/
#ifdef __LITTLE_ENDIAN
#define __change_bit_le(nr, addr) __change_bit(nr, addr)
#elif defined(__BIG_ENDIAN)
#define __change_bit_le(nr, addr) \
__change_bit((nr) ^ ((BITS_PER_LONG - 1) & ~0x7), addr)
#else
#error "Unknown byte order"
#endif
static void single_bit_error_data(void *error_data, void *correct_data,
size_t size)
{
unsigned int offset = get_random_u32_below(size * BITS_PER_BYTE);
memcpy(error_data, correct_data, size);
__change_bit_le(offset, error_data);
}
static void double_bit_error_data(void *error_data, void *correct_data,
size_t size)
{
unsigned int offset[2];
offset[0] = get_random_u32_below(size * BITS_PER_BYTE);
do {
offset[1] = get_random_u32_below(size * BITS_PER_BYTE);
} while (offset[0] == offset[1]);
memcpy(error_data, correct_data, size);
__change_bit_le(offset[0], error_data);
__change_bit_le(offset[1], error_data);
}
static unsigned int random_ecc_bit(size_t size)
{
unsigned int offset = get_random_u32_below(3 * BITS_PER_BYTE);
if (size == 256) {
/*
* Don't inject a bit error into the insignificant bits (16th
* and 17th bit) in ECC code for 256 byte data block
*/
while (offset == 16 || offset == 17)
offset = get_random_u32_below(3 * BITS_PER_BYTE);
}
return offset;
}
static void single_bit_error_ecc(void *error_ecc, void *correct_ecc,
size_t size)
{
unsigned int offset = random_ecc_bit(size);
memcpy(error_ecc, correct_ecc, 3);
__change_bit_le(offset, error_ecc);
}
static void double_bit_error_ecc(void *error_ecc, void *correct_ecc,
size_t size)
{
unsigned int offset[2];
offset[0] = random_ecc_bit(size);
do {
offset[1] = random_ecc_bit(size);
} while (offset[0] == offset[1]);
memcpy(error_ecc, correct_ecc, 3);
__change_bit_le(offset[0], error_ecc);
__change_bit_le(offset[1], error_ecc);
}
static void no_bit_error(void *error_data, void *error_ecc,
void *correct_data, void *correct_ecc, const size_t size)
{
memcpy(error_data, correct_data, size);
memcpy(error_ecc, correct_ecc, 3);
}
static int no_bit_error_verify(void *error_data, void *error_ecc,
void *correct_data, const size_t size)
{
bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
unsigned char calc_ecc[3];
int ret;
ecc_sw_hamming_calculate(error_data, size, calc_ecc, sm_order);
ret = ecc_sw_hamming_correct(error_data, error_ecc, calc_ecc, size,
sm_order);
if (ret == 0 && !memcmp(correct_data, error_data, size))
return 0;
return -EINVAL;
}
static void single_bit_error_in_data(void *error_data, void *error_ecc,
void *correct_data, void *correct_ecc, const size_t size)
{
single_bit_error_data(error_data, correct_data, size);
memcpy(error_ecc, correct_ecc, 3);
}
static void single_bit_error_in_ecc(void *error_data, void *error_ecc,
void *correct_data, void *correct_ecc, const size_t size)
{
memcpy(error_data, correct_data, size);
single_bit_error_ecc(error_ecc, correct_ecc, size);
}
static int single_bit_error_correct(void *error_data, void *error_ecc,
void *correct_data, const size_t size)
{
bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
unsigned char calc_ecc[3];
int ret;
ecc_sw_hamming_calculate(error_data, size, calc_ecc, sm_order);
ret = ecc_sw_hamming_correct(error_data, error_ecc, calc_ecc, size,
sm_order);
if (ret == 1 && !memcmp(correct_data, error_data, size))
return 0;
return -EINVAL;
}
static void double_bit_error_in_data(void *error_data, void *error_ecc,
void *correct_data, void *correct_ecc, const size_t size)
{
double_bit_error_data(error_data, correct_data, size);
memcpy(error_ecc, correct_ecc, 3);
}
static void single_bit_error_in_data_and_ecc(void *error_data, void *error_ecc,
void *correct_data, void *correct_ecc, const size_t size)
{
single_bit_error_data(error_data, correct_data, size);
single_bit_error_ecc(error_ecc, correct_ecc, size);
}
static void double_bit_error_in_ecc(void *error_data, void *error_ecc,
void *correct_data, void *correct_ecc, const size_t size)
{
memcpy(error_data, correct_data, size);
double_bit_error_ecc(error_ecc, correct_ecc, size);
}
static int double_bit_error_detect(void *error_data, void *error_ecc,
void *correct_data, const size_t size)
{
bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
unsigned char calc_ecc[3];
int ret;
ecc_sw_hamming_calculate(error_data, size, calc_ecc, sm_order);
ret = ecc_sw_hamming_correct(error_data, error_ecc, calc_ecc, size,
sm_order);
return (ret == -EBADMSG) ? 0 : -EINVAL;
}
static const struct nand_ecc_test nand_ecc_test[] = {
{
.name = "no-bit-error",
.prepare = no_bit_error,
.verify = no_bit_error_verify,
},
{
.name = "single-bit-error-in-data-correct",
.prepare = single_bit_error_in_data,
.verify = single_bit_error_correct,
},
{
.name = "single-bit-error-in-ecc-correct",
.prepare = single_bit_error_in_ecc,
.verify = single_bit_error_correct,
},
{
.name = "double-bit-error-in-data-detect",
.prepare = double_bit_error_in_data,
.verify = double_bit_error_detect,
},
{
.name = "single-bit-error-in-data-and-ecc-detect",
.prepare = single_bit_error_in_data_and_ecc,
.verify = double_bit_error_detect,
},
{
.name = "double-bit-error-in-ecc-detect",
.prepare = double_bit_error_in_ecc,
.verify = double_bit_error_detect,
},
};
static void dump_data_ecc(void *error_data, void *error_ecc, void *correct_data,
void *correct_ecc, const size_t size)
{
pr_info("hexdump of error data:\n");
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
error_data, size, false);
print_hex_dump(KERN_INFO, "hexdump of error ecc: ",
DUMP_PREFIX_NONE, 16, 1, error_ecc, 3, false);
pr_info("hexdump of correct data:\n");
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
correct_data, size, false);
print_hex_dump(KERN_INFO, "hexdump of correct ecc: ",
DUMP_PREFIX_NONE, 16, 1, correct_ecc, 3, false);
}
static int nand_ecc_test_run(const size_t size)
{
bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
int i;
int err = 0;
void *error_data;
void *error_ecc;
void *correct_data;
void *correct_ecc;
error_data = kmalloc(size, GFP_KERNEL);
error_ecc = kmalloc(3, GFP_KERNEL);
correct_data = kmalloc(size, GFP_KERNEL);
correct_ecc = kmalloc(3, GFP_KERNEL);
if (!error_data || !error_ecc || !correct_data || !correct_ecc) {
err = -ENOMEM;
goto error;
}
get_random_bytes(correct_data, size);
ecc_sw_hamming_calculate(correct_data, size, correct_ecc, sm_order);
for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
nand_ecc_test[i].prepare(error_data, error_ecc,
correct_data, correct_ecc, size);
err = nand_ecc_test[i].verify(error_data, error_ecc,
correct_data, size);
if (err) {
pr_err("not ok - %s-%zd\n",
nand_ecc_test[i].name, size);
dump_data_ecc(error_data, error_ecc,
correct_data, correct_ecc, size);
break;
}
pr_info("ok - %s-%zd\n",
nand_ecc_test[i].name, size);
err = mtdtest_relax();
if (err)
break;
}
error:
kfree(error_data);
kfree(error_ecc);
kfree(correct_data);
kfree(correct_ecc);
return err;
}
#else
static int nand_ecc_test_run(const size_t size)
{
return 0;
}
#endif
static int __init ecc_test_init(void)
{
int err;
err = nand_ecc_test_run(256);
if (err)
return err;
return nand_ecc_test_run(512);
}
static void __exit ecc_test_exit(void)
{
}
module_init(ecc_test_init);
module_exit(ecc_test_exit);
MODULE_DESCRIPTION("NAND ECC function test module");
MODULE_AUTHOR("Akinobu Mita");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/tests/mtd_nandecctest.c |
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) "mtd_test: " fmt
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/printk.h>
#include "mtd_test.h"
int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum)
{
int err;
struct erase_info ei;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
memset(&ei, 0, sizeof(struct erase_info));
ei.addr = addr;
ei.len = mtd->erasesize;
err = mtd_erase(mtd, &ei);
if (err) {
pr_info("error %d while erasing EB %d\n", err, ebnum);
return err;
}
return 0;
}
static int is_block_bad(struct mtd_info *mtd, unsigned int ebnum)
{
int ret;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
ret = mtd_block_isbad(mtd, addr);
if (ret)
pr_info("block %d is bad\n", ebnum);
return ret;
}
int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
unsigned int eb, int ebcnt)
{
int i, bad = 0;
if (!mtd_can_have_bb(mtd))
return 0;
pr_info("scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(mtd, eb + i) ? 1 : 0;
if (bbt[i])
bad += 1;
cond_resched();
}
pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
return 0;
}
int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
unsigned int eb, int ebcnt)
{
int err;
unsigned int i;
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = mtdtest_erase_eraseblock(mtd, eb + i);
if (err)
return err;
cond_resched();
}
return 0;
}
int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf)
{
size_t read;
int err;
err = mtd_read(mtd, addr, size, &read, buf);
/* Ignore corrected ECC errors */
if (mtd_is_bitflip(err))
err = 0;
if (!err && read != size)
err = -EIO;
if (err)
pr_err("error: read failed at %#llx\n", addr);
return err;
}
int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
const void *buf)
{
size_t written;
int err;
err = mtd_write(mtd, addr, size, &written, buf);
if (!err && written != size)
err = -EIO;
if (err)
pr_err("error: write failed at %#llx\n", addr);
return err;
}
| linux-master | drivers/mtd/tests/mtd_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2008 Nokia Corporation
*
* Check MTD device read.
*
* Author: Adrian Hunter <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/err.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "mtd_test.h"
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
static struct mtd_info *mtd;
static unsigned char *iobuf;
static unsigned char *iobuf1;
static unsigned char *bbt;
static int pgsize;
static int ebcnt;
static int pgcnt;
static int read_eraseblock_by_page(int ebnum)
{
int i, ret, err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
void *buf = iobuf;
void *oobbuf = iobuf1;
for (i = 0; i < pgcnt; i++) {
memset(buf, 0 , pgsize);
ret = mtdtest_read(mtd, addr, pgsize, buf);
if (ret) {
if (!err)
err = ret;
}
if (mtd->oobsize) {
struct mtd_oob_ops ops = { };
ops.mode = MTD_OPS_PLACE_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->oobsize;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = oobbuf;
ret = mtd_read_oob(mtd, addr, &ops);
if ((ret && !mtd_is_bitflip(ret)) ||
ops.oobretlen != mtd->oobsize) {
pr_err("error: read oob failed at "
"%#llx\n", (long long)addr);
if (!err)
err = ret;
if (!err)
err = -EINVAL;
}
oobbuf += mtd->oobsize;
}
addr += pgsize;
buf += pgsize;
}
return err;
}
static void dump_eraseblock(int ebnum)
{
int i, j, n;
char line[128];
int pg, oob;
pr_info("dumping eraseblock %d\n", ebnum);
n = mtd->erasesize;
for (i = 0; i < n;) {
char *p = line;
p += sprintf(p, "%05x: ", i);
for (j = 0; j < 32 && i < n; j++, i++)
p += sprintf(p, "%02x", (unsigned int)iobuf[i]);
printk(KERN_CRIT "%s\n", line);
cond_resched();
}
if (!mtd->oobsize)
return;
pr_info("dumping oob from eraseblock %d\n", ebnum);
n = mtd->oobsize;
for (pg = 0, i = 0; pg < pgcnt; pg++)
for (oob = 0; oob < n;) {
char *p = line;
p += sprintf(p, "%05x: ", i);
for (j = 0; j < 32 && oob < n; j++, oob++, i++)
p += sprintf(p, "%02x",
(unsigned int)iobuf1[i]);
printk(KERN_CRIT "%s\n", line);
cond_resched();
}
}
static int __init mtd_readtest_init(void)
{
uint64_t tmp;
int err, i;
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
pr_info("Please specify a valid mtd-device via module parameter\n");
return -EINVAL;
}
pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
pr_err("error: Cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
pgsize = mtd->writesize;
tmp = mtd->size;
do_div(tmp, mtd->erasesize);
ebcnt = tmp;
pgcnt = mtd->erasesize / pgsize;
pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
pgsize, ebcnt, pgcnt, mtd->oobsize);
err = -ENOMEM;
iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!iobuf)
goto out;
iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!iobuf1)
goto out;
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt)
goto out;
err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
/* Read all eraseblocks 1 page at a time */
pr_info("testing page read\n");
for (i = 0; i < ebcnt; ++i) {
int ret;
if (bbt[i])
continue;
ret = read_eraseblock_by_page(i);
if (ret) {
dump_eraseblock(i);
if (!err)
err = ret;
}
ret = mtdtest_relax();
if (ret) {
err = ret;
goto out;
}
}
if (err)
pr_info("finished with errors\n");
else
pr_info("finished\n");
out:
kfree(iobuf);
kfree(iobuf1);
kfree(bbt);
put_mtd_device(mtd);
if (err)
pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
module_init(mtd_readtest_init);
static void __exit mtd_readtest_exit(void)
{
return;
}
module_exit(mtd_readtest_exit);
MODULE_DESCRIPTION("Read test module");
MODULE_AUTHOR("Adrian Hunter");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/tests/readtest.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2012 NetCommWireless
* Iwo Mergler <[email protected]>
*
* Test for multi-bit error recovery on a NAND page This mostly tests the
* ECC controller / driver.
*
* There are two test modes:
*
* 0 - artificially inserting bit errors until the ECC fails
* This is the default method and fairly quick. It should
* be independent of the quality of the FLASH.
*
* 1 - re-writing the same pattern repeatedly until the ECC fails.
* This method relies on the physics of NAND FLASH to eventually
* generate '0' bits if '1' has been written sufficient times.
* Depending on the NAND, the first bit errors will appear after
* 1000 or more writes and then will usually snowball, reaching the
* limits of the ECC quickly.
*
* The test stops after 10000 cycles, should your FLASH be
* exceptionally good and not generate bit errors before that. Try
* a different page in that case.
*
* Please note that neither of these tests will significantly 'use up' any
* FLASH endurance. Only a maximum of two erase operations will be performed.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mtd/mtd.h>
#include <linux/err.h>
#include <linux/mtd/rawnand.h>
#include <linux/slab.h>
#include "mtd_test.h"
static int dev;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
static unsigned page_offset;
module_param(page_offset, uint, S_IRUGO);
MODULE_PARM_DESC(page_offset, "Page number relative to dev start");
static unsigned seed;
module_param(seed, uint, S_IRUGO);
MODULE_PARM_DESC(seed, "Random seed");
static int mode;
module_param(mode, int, S_IRUGO);
MODULE_PARM_DESC(mode, "0=incremental errors, 1=overwrite test");
static unsigned max_overwrite = 10000;
static loff_t offset; /* Offset of the page we're using. */
static unsigned eraseblock; /* Eraseblock number for our page. */
/* We assume that the ECC can correct up to a certain number
* of biterrors per subpage. */
static unsigned subsize; /* Size of subpages */
static unsigned subcount; /* Number of subpages per page */
static struct mtd_info *mtd; /* MTD device */
static uint8_t *wbuffer; /* One page write / compare buffer */
static uint8_t *rbuffer; /* One page read buffer */
/* 'random' bytes from known offsets */
static uint8_t hash(unsigned offset)
{
unsigned v = offset;
unsigned char c;
v ^= 0x7f7edfd3;
v = v ^ (v >> 3);
v = v ^ (v >> 5);
v = v ^ (v >> 13);
c = v & 0xFF;
/* Reverse bits of result. */
c = (c & 0x0F) << 4 | (c & 0xF0) >> 4;
c = (c & 0x33) << 2 | (c & 0xCC) >> 2;
c = (c & 0x55) << 1 | (c & 0xAA) >> 1;
return c;
}
/* Writes wbuffer to page */
static int write_page(int log)
{
if (log)
pr_info("write_page\n");
return mtdtest_write(mtd, offset, mtd->writesize, wbuffer);
}
/* Re-writes the data area while leaving the OOB alone. */
static int rewrite_page(int log)
{
int err = 0;
struct mtd_oob_ops ops = { };
if (log)
pr_info("rewrite page\n");
ops.mode = MTD_OPS_RAW; /* No ECC */
ops.len = mtd->writesize;
ops.retlen = 0;
ops.ooblen = 0;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = wbuffer;
ops.oobbuf = NULL;
err = mtd_write_oob(mtd, offset, &ops);
if (err || ops.retlen != mtd->writesize) {
pr_err("error: write_oob failed (%d)\n", err);
if (!err)
err = -EIO;
}
return err;
}
/* Reads page into rbuffer. Returns number of corrected bit errors (>=0)
* or error (<0) */
static int read_page(int log)
{
int err = 0;
size_t read;
struct mtd_ecc_stats oldstats;
if (log)
pr_info("read_page\n");
/* Saving last mtd stats */
memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
err = mtd_read(mtd, offset, mtd->writesize, &read, rbuffer);
if (!err || err == -EUCLEAN)
err = mtd->ecc_stats.corrected - oldstats.corrected;
if (err < 0 || read != mtd->writesize) {
pr_err("error: read failed at %#llx\n", (long long)offset);
if (err >= 0)
err = -EIO;
}
return err;
}
/* Verifies rbuffer against random sequence */
static int verify_page(int log)
{
unsigned i, errs = 0;
if (log)
pr_info("verify_page\n");
for (i = 0; i < mtd->writesize; i++) {
if (rbuffer[i] != hash(i+seed)) {
pr_err("Error: page offset %u, expected %02x, got %02x\n",
i, hash(i+seed), rbuffer[i]);
errs++;
}
}
if (errs)
return -EIO;
else
return 0;
}
#define CBIT(v, n) ((v) & (1 << (n)))
#define BCLR(v, n) ((v) = (v) & ~(1 << (n)))
/* Finds the first '1' bit in wbuffer starting at offset 'byte'
* and sets it to '0'. */
static int insert_biterror(unsigned byte)
{
int bit;
while (byte < mtd->writesize) {
for (bit = 7; bit >= 0; bit--) {
if (CBIT(wbuffer[byte], bit)) {
BCLR(wbuffer[byte], bit);
pr_info("Inserted biterror @ %u/%u\n", byte, bit);
return 0;
}
}
byte++;
}
pr_err("biterror: Failed to find a '1' bit\n");
return -EIO;
}
/* Writes 'random' data to page and then introduces deliberate bit
* errors into the page, while verifying each step. */
static int incremental_errors_test(void)
{
int err = 0;
unsigned i;
unsigned errs_per_subpage = 0;
pr_info("incremental biterrors test\n");
for (i = 0; i < mtd->writesize; i++)
wbuffer[i] = hash(i+seed);
err = write_page(1);
if (err)
goto exit;
while (1) {
err = rewrite_page(1);
if (err)
goto exit;
err = read_page(1);
if (err > 0)
pr_info("Read reported %d corrected bit errors\n", err);
if (err < 0) {
pr_err("After %d biterrors per subpage, read reported error %d\n",
errs_per_subpage, err);
err = 0;
goto exit;
}
err = verify_page(1);
if (err) {
pr_err("ECC failure, read data is incorrect despite read success\n");
goto exit;
}
pr_info("Successfully corrected %d bit errors per subpage\n",
errs_per_subpage);
for (i = 0; i < subcount; i++) {
err = insert_biterror(i * subsize);
if (err < 0)
goto exit;
}
errs_per_subpage++;
}
exit:
return err;
}
/* Writes 'random' data to page and then re-writes that same data repeatedly.
This eventually develops bit errors (bits written as '1' will slowly become
'0'), which are corrected as far as the ECC is capable of. */
static int overwrite_test(void)
{
int err = 0;
unsigned i;
unsigned max_corrected = 0;
unsigned opno = 0;
/* We don't expect more than this many correctable bit errors per
* page. */
#define MAXBITS 512
static unsigned bitstats[MAXBITS]; /* bit error histogram. */
memset(bitstats, 0, sizeof(bitstats));
pr_info("overwrite biterrors test\n");
for (i = 0; i < mtd->writesize; i++)
wbuffer[i] = hash(i+seed);
err = write_page(1);
if (err)
goto exit;
while (opno < max_overwrite) {
err = write_page(0);
if (err)
break;
err = read_page(0);
if (err >= 0) {
if (err >= MAXBITS) {
pr_info("Implausible number of bit errors corrected\n");
err = -EIO;
break;
}
bitstats[err]++;
if (err > max_corrected) {
max_corrected = err;
pr_info("Read reported %d corrected bit errors\n",
err);
}
} else { /* err < 0 */
pr_info("Read reported error %d\n", err);
err = 0;
break;
}
err = verify_page(0);
if (err) {
bitstats[max_corrected] = opno;
pr_info("ECC failure, read data is incorrect despite read success\n");
break;
}
err = mtdtest_relax();
if (err)
break;
opno++;
}
/* At this point bitstats[0] contains the number of ops with no bit
* errors, bitstats[1] the number of ops with 1 bit error, etc. */
pr_info("Bit error histogram (%d operations total):\n", opno);
for (i = 0; i < max_corrected; i++)
pr_info("Page reads with %3d corrected bit errors: %d\n",
i, bitstats[i]);
exit:
return err;
}
static int __init mtd_nandbiterrs_init(void)
{
int err = 0;
printk("\n");
printk(KERN_INFO "==================================================\n");
pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
pr_err("error: cannot get MTD device\n");
goto exit_mtddev;
}
if (!mtd_type_is_nand(mtd)) {
pr_info("this test requires NAND flash\n");
err = -ENODEV;
goto exit_nand;
}
pr_info("MTD device size %llu, eraseblock=%u, page=%u, oob=%u\n",
(unsigned long long)mtd->size, mtd->erasesize,
mtd->writesize, mtd->oobsize);
subsize = mtd->writesize >> mtd->subpage_sft;
subcount = mtd->writesize / subsize;
pr_info("Device uses %d subpages of %d bytes\n", subcount, subsize);
offset = (loff_t)page_offset * mtd->writesize;
eraseblock = mtd_div_by_eb(offset, mtd);
pr_info("Using page=%u, offset=%llu, eraseblock=%u\n",
page_offset, offset, eraseblock);
wbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
if (!wbuffer) {
err = -ENOMEM;
goto exit_wbuffer;
}
rbuffer = kmalloc(mtd->writesize, GFP_KERNEL);
if (!rbuffer) {
err = -ENOMEM;
goto exit_rbuffer;
}
err = mtdtest_erase_eraseblock(mtd, eraseblock);
if (err)
goto exit_error;
if (mode == 0)
err = incremental_errors_test();
else
err = overwrite_test();
if (err)
goto exit_error;
/* We leave the block un-erased in case of test failure. */
err = mtdtest_erase_eraseblock(mtd, eraseblock);
if (err)
goto exit_error;
err = -EIO;
pr_info("finished successfully.\n");
printk(KERN_INFO "==================================================\n");
exit_error:
kfree(rbuffer);
exit_rbuffer:
kfree(wbuffer);
exit_wbuffer:
/* Nothing */
exit_nand:
put_mtd_device(mtd);
exit_mtddev:
return err;
}
static void __exit mtd_nandbiterrs_exit(void)
{
return;
}
module_init(mtd_nandbiterrs_init);
module_exit(mtd_nandbiterrs_exit);
MODULE_DESCRIPTION("NAND bit error recovery test");
MODULE_AUTHOR("Iwo Mergler");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/tests/nandbiterrs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2008 Nokia Corporation
*
* Test page read and write on MTD device.
*
* Author: Adrian Hunter <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/div64.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/err.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/random.h>
#include "mtd_test.h"
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
static struct mtd_info *mtd;
static unsigned char *twopages;
static unsigned char *writebuf;
static unsigned char *boundary;
static unsigned char *bbt;
static int pgsize;
static int bufsize;
static int ebcnt;
static int pgcnt;
static int errcnt;
static struct rnd_state rnd_state;
static int write_eraseblock(int ebnum)
{
loff_t addr = (loff_t)ebnum * mtd->erasesize;
prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
cond_resched();
return mtdtest_write(mtd, addr, mtd->erasesize, writebuf);
}
static int verify_eraseblock(int ebnum)
{
uint32_t j;
int err = 0, i;
loff_t addr0, addrn;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
addr0 = 0;
for (i = 0; i < ebcnt && bbt[i]; ++i)
addr0 += mtd->erasesize;
addrn = mtd->size;
for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i)
addrn -= mtd->erasesize;
prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
/* Do a read to set the internal dataRAMs to different data */
err = mtdtest_read(mtd, addr0, bufsize, twopages);
if (err)
return err;
err = mtdtest_read(mtd, addrn - bufsize, bufsize, twopages);
if (err)
return err;
memset(twopages, 0, bufsize);
err = mtdtest_read(mtd, addr, bufsize, twopages);
if (err)
break;
if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
}
}
/* Check boundary between eraseblocks */
if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) {
struct rnd_state old_state = rnd_state;
/* Do a read to set the internal dataRAMs to different data */
err = mtdtest_read(mtd, addr0, bufsize, twopages);
if (err)
return err;
err = mtdtest_read(mtd, addrn - bufsize, bufsize, twopages);
if (err)
return err;
memset(twopages, 0, bufsize);
err = mtdtest_read(mtd, addr, bufsize, twopages);
if (err)
return err;
memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
prandom_bytes_state(&rnd_state, boundary + pgsize, pgsize);
if (memcmp(twopages, boundary, bufsize)) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
}
rnd_state = old_state;
}
return err;
}
static int crosstest(void)
{
int err = 0, i;
loff_t addr, addr0, addrn;
unsigned char *pp1, *pp2, *pp3, *pp4;
pr_info("crosstest\n");
pp1 = kcalloc(pgsize, 4, GFP_KERNEL);
if (!pp1)
return -ENOMEM;
pp2 = pp1 + pgsize;
pp3 = pp2 + pgsize;
pp4 = pp3 + pgsize;
addr0 = 0;
for (i = 0; i < ebcnt && bbt[i]; ++i)
addr0 += mtd->erasesize;
addrn = mtd->size;
for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i)
addrn -= mtd->erasesize;
/* Read 2nd-to-last page to pp1 */
addr = addrn - pgsize - pgsize;
err = mtdtest_read(mtd, addr, pgsize, pp1);
if (err) {
kfree(pp1);
return err;
}
/* Read 3rd-to-last page to pp1 */
addr = addrn - pgsize - pgsize - pgsize;
err = mtdtest_read(mtd, addr, pgsize, pp1);
if (err) {
kfree(pp1);
return err;
}
/* Read first page to pp2 */
addr = addr0;
pr_info("reading page at %#llx\n", (long long)addr);
err = mtdtest_read(mtd, addr, pgsize, pp2);
if (err) {
kfree(pp1);
return err;
}
/* Read last page to pp3 */
addr = addrn - pgsize;
pr_info("reading page at %#llx\n", (long long)addr);
err = mtdtest_read(mtd, addr, pgsize, pp3);
if (err) {
kfree(pp1);
return err;
}
/* Read first page again to pp4 */
addr = addr0;
pr_info("reading page at %#llx\n", (long long)addr);
err = mtdtest_read(mtd, addr, pgsize, pp4);
if (err) {
kfree(pp1);
return err;
}
/* pp2 and pp4 should be the same */
pr_info("verifying pages read at %#llx match\n",
(long long)addr0);
if (memcmp(pp2, pp4, pgsize)) {
pr_err("verify failed!\n");
errcnt += 1;
} else if (!err)
pr_info("crosstest ok\n");
kfree(pp1);
return err;
}
static int erasecrosstest(void)
{
int err = 0, i, ebnum, ebnum2;
loff_t addr0;
char *readbuf = twopages;
pr_info("erasecrosstest\n");
ebnum = 0;
addr0 = 0;
for (i = 0; i < ebcnt && bbt[i]; ++i) {
addr0 += mtd->erasesize;
ebnum += 1;
}
ebnum2 = ebcnt - 1;
while (ebnum2 && bbt[ebnum2])
ebnum2 -= 1;
pr_info("erasing block %d\n", ebnum);
err = mtdtest_erase_eraseblock(mtd, ebnum);
if (err)
return err;
pr_info("writing 1st page of block %d\n", ebnum);
prandom_bytes_state(&rnd_state, writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
err = mtdtest_write(mtd, addr0, pgsize, writebuf);
if (err)
return err;
pr_info("reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
err = mtdtest_read(mtd, addr0, pgsize, readbuf);
if (err)
return err;
pr_info("verifying 1st page of block %d\n", ebnum);
if (memcmp(writebuf, readbuf, pgsize)) {
pr_err("verify failed!\n");
errcnt += 1;
return -1;
}
pr_info("erasing block %d\n", ebnum);
err = mtdtest_erase_eraseblock(mtd, ebnum);
if (err)
return err;
pr_info("writing 1st page of block %d\n", ebnum);
prandom_bytes_state(&rnd_state, writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
err = mtdtest_write(mtd, addr0, pgsize, writebuf);
if (err)
return err;
pr_info("erasing block %d\n", ebnum2);
err = mtdtest_erase_eraseblock(mtd, ebnum2);
if (err)
return err;
pr_info("reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
err = mtdtest_read(mtd, addr0, pgsize, readbuf);
if (err)
return err;
pr_info("verifying 1st page of block %d\n", ebnum);
if (memcmp(writebuf, readbuf, pgsize)) {
pr_err("verify failed!\n");
errcnt += 1;
return -1;
}
if (!err)
pr_info("erasecrosstest ok\n");
return err;
}
static int erasetest(void)
{
int err = 0, i, ebnum, ok = 1;
loff_t addr0;
pr_info("erasetest\n");
ebnum = 0;
addr0 = 0;
for (i = 0; i < ebcnt && bbt[i]; ++i) {
addr0 += mtd->erasesize;
ebnum += 1;
}
pr_info("erasing block %d\n", ebnum);
err = mtdtest_erase_eraseblock(mtd, ebnum);
if (err)
return err;
pr_info("writing 1st page of block %d\n", ebnum);
prandom_bytes_state(&rnd_state, writebuf, pgsize);
err = mtdtest_write(mtd, addr0, pgsize, writebuf);
if (err)
return err;
pr_info("erasing block %d\n", ebnum);
err = mtdtest_erase_eraseblock(mtd, ebnum);
if (err)
return err;
pr_info("reading 1st page of block %d\n", ebnum);
err = mtdtest_read(mtd, addr0, pgsize, twopages);
if (err)
return err;
pr_info("verifying 1st page of block %d is all 0xff\n",
ebnum);
for (i = 0; i < pgsize; ++i)
if (twopages[i] != 0xff) {
pr_err("verifying all 0xff failed at %d\n",
i);
errcnt += 1;
ok = 0;
break;
}
if (ok && !err)
pr_info("erasetest ok\n");
return err;
}
static int __init mtd_pagetest_init(void)
{
int err = 0;
uint64_t tmp;
uint32_t i;
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
pr_info("Please specify a valid mtd-device via module parameter\n");
pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
pr_err("error: cannot get MTD device\n");
return err;
}
if (!mtd_type_is_nand(mtd)) {
pr_info("this test requires NAND flash\n");
goto out;
}
tmp = mtd->size;
do_div(tmp, mtd->erasesize);
ebcnt = tmp;
pgcnt = mtd->erasesize / mtd->writesize;
pgsize = mtd->writesize;
pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
pgsize, ebcnt, pgcnt, mtd->oobsize);
err = -ENOMEM;
bufsize = pgsize * 2;
writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!writebuf)
goto out;
twopages = kmalloc(bufsize, GFP_KERNEL);
if (!twopages)
goto out;
boundary = kmalloc(bufsize, GFP_KERNEL);
if (!boundary)
goto out;
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt)
goto out;
err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
/* Erase all eraseblocks */
pr_info("erasing whole device\n");
err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
pr_info("erased %u eraseblocks\n", ebcnt);
/* Write all eraseblocks */
prandom_seed_state(&rnd_state, 1);
pr_info("writing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = write_eraseblock(i);
if (err)
goto out;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
err = mtdtest_relax();
if (err)
goto out;
}
pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
prandom_seed_state(&rnd_state, 1);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = verify_eraseblock(i);
if (err)
goto out;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
err = mtdtest_relax();
if (err)
goto out;
}
pr_info("verified %u eraseblocks\n", i);
err = crosstest();
if (err)
goto out;
if (ebcnt > 1) {
err = erasecrosstest();
if (err)
goto out;
} else {
pr_info("skipping erasecrosstest, 2 erase blocks needed\n");
}
err = erasetest();
if (err)
goto out;
pr_info("finished with %d errors\n", errcnt);
out:
kfree(bbt);
kfree(boundary);
kfree(twopages);
kfree(writebuf);
put_mtd_device(mtd);
if (err)
pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
module_init(mtd_pagetest_init);
static void __exit mtd_pagetest_exit(void)
{
return;
}
module_exit(mtd_pagetest_exit);
MODULE_DESCRIPTION("NAND page test");
MODULE_AUTHOR("Adrian Hunter");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/tests/pagetest.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2008 Artem Bityutskiy
* Copyright (C) 2006-2008 Jarkko Lavinen
* Copyright (C) 2006-2008 Adrian Hunter
*
* Authors: Artem Bityutskiy, Jarkko Lavinen, Adria Hunter
*
* WARNING: this test program may kill your flash and your device. Do not
* use it unless you know what you do. Authors are not responsible for any
* damage caused by this program.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/err.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "mtd_test.h"
#define RETRIES 3
static int eb = 8;
module_param(eb, int, S_IRUGO);
MODULE_PARM_DESC(eb, "eraseblock number within the selected MTD device");
static int ebcnt = 32;
module_param(ebcnt, int, S_IRUGO);
MODULE_PARM_DESC(ebcnt, "number of consecutive eraseblocks to torture");
static int pgcnt;
module_param(pgcnt, int, S_IRUGO);
MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)");
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
static int gran = 512;
module_param(gran, int, S_IRUGO);
MODULE_PARM_DESC(gran, "how often the status information should be printed");
static int check = 1;
module_param(check, int, S_IRUGO);
MODULE_PARM_DESC(check, "if the written data should be checked");
static unsigned int cycles_count;
module_param(cycles_count, uint, S_IRUGO);
MODULE_PARM_DESC(cycles_count, "how many erase cycles to do "
"(infinite by default)");
static struct mtd_info *mtd;
/* This buffer contains 0x555555...0xAAAAAA... pattern */
static unsigned char *patt_5A5;
/* This buffer contains 0xAAAAAA...0x555555... pattern */
static unsigned char *patt_A5A;
/* This buffer contains all 0xFF bytes */
static unsigned char *patt_FF;
/* This a temporary buffer is use when checking data */
static unsigned char *check_buf;
/* How many erase cycles were done */
static unsigned int erase_cycles;
static int pgsize;
static ktime_t start, finish;
static void report_corrupt(unsigned char *read, unsigned char *written);
static inline void start_timing(void)
{
start = ktime_get();
}
static inline void stop_timing(void)
{
finish = ktime_get();
}
/*
* Check that the contents of eraseblock number @enbum is equivalent to the
* @buf buffer.
*/
static inline int check_eraseblock(int ebnum, unsigned char *buf)
{
int err, retries = 0;
size_t read;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
size_t len = mtd->erasesize;
if (pgcnt) {
addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
len = pgcnt * pgsize;
}
retry:
err = mtd_read(mtd, addr, len, &read, check_buf);
if (mtd_is_bitflip(err))
pr_err("single bit flip occurred at EB %d "
"MTD reported that it was fixed.\n", ebnum);
else if (err) {
pr_err("error %d while reading EB %d, "
"read %zd\n", err, ebnum, read);
return err;
}
if (read != len) {
pr_err("failed to read %zd bytes from EB %d, "
"read only %zd, but no error reported\n",
len, ebnum, read);
return -EIO;
}
if (memcmp(buf, check_buf, len)) {
pr_err("read wrong data from EB %d\n", ebnum);
report_corrupt(check_buf, buf);
if (retries++ < RETRIES) {
/* Try read again */
yield();
pr_info("re-try reading data from EB %d\n",
ebnum);
goto retry;
} else {
pr_info("retried %d times, still errors, "
"give-up\n", RETRIES);
return -EINVAL;
}
}
if (retries != 0)
pr_info("only attempt number %d was OK (!!!)\n",
retries);
return 0;
}
static inline int write_pattern(int ebnum, void *buf)
{
int err;
size_t written;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
size_t len = mtd->erasesize;
if (pgcnt) {
addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
len = pgcnt * pgsize;
}
err = mtd_write(mtd, addr, len, &written, buf);
if (err) {
pr_err("error %d while writing EB %d, written %zd"
" bytes\n", err, ebnum, written);
return err;
}
if (written != len) {
pr_info("written only %zd bytes of %zd, but no error"
" reported\n", written, len);
return -EIO;
}
return 0;
}
static int __init tort_init(void)
{
int err = 0, i, infinite = !cycles_count;
unsigned char *bad_ebs;
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
pr_info("Warning: this program is trying to wear out your "
"flash, stop it if this is not wanted.\n");
if (dev < 0) {
pr_info("Please specify a valid mtd-device via module parameter\n");
pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
pr_info("MTD device: %d\n", dev);
pr_info("torture %d eraseblocks (%d-%d) of mtd%d\n",
ebcnt, eb, eb + ebcnt - 1, dev);
if (pgcnt)
pr_info("torturing just %d pages per eraseblock\n",
pgcnt);
pr_info("write verify %s\n", check ? "enabled" : "disabled");
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
pgsize = mtd->writesize;
if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) {
pr_err("error: invalid pgcnt value %d\n", pgcnt);
goto out_mtd;
}
err = -ENOMEM;
patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!patt_5A5)
goto out_mtd;
patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!patt_A5A)
goto out_patt_5A5;
patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!patt_FF)
goto out_patt_A5A;
check_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!check_buf)
goto out_patt_FF;
bad_ebs = kzalloc(ebcnt, GFP_KERNEL);
if (!bad_ebs)
goto out_check_buf;
/* Initialize patterns */
memset(patt_FF, 0xFF, mtd->erasesize);
for (i = 0; i < mtd->erasesize / pgsize; i++) {
if (!(i & 1)) {
memset(patt_5A5 + i * pgsize, 0x55, pgsize);
memset(patt_A5A + i * pgsize, 0xAA, pgsize);
} else {
memset(patt_5A5 + i * pgsize, 0xAA, pgsize);
memset(patt_A5A + i * pgsize, 0x55, pgsize);
}
}
err = mtdtest_scan_for_bad_eraseblocks(mtd, bad_ebs, eb, ebcnt);
if (err)
goto out;
start_timing();
while (1) {
int i;
void *patt;
err = mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt);
if (err)
goto out;
/* Check if the eraseblocks contain only 0xFF bytes */
if (check) {
for (i = eb; i < eb + ebcnt; i++) {
if (bad_ebs[i - eb])
continue;
err = check_eraseblock(i, patt_FF);
if (err) {
pr_info("verify failed"
" for 0xFF... pattern\n");
goto out;
}
err = mtdtest_relax();
if (err)
goto out;
}
}
/* Write the pattern */
for (i = eb; i < eb + ebcnt; i++) {
if (bad_ebs[i - eb])
continue;
if ((eb + erase_cycles) & 1)
patt = patt_5A5;
else
patt = patt_A5A;
err = write_pattern(i, patt);
if (err)
goto out;
err = mtdtest_relax();
if (err)
goto out;
}
/* Verify what we wrote */
if (check) {
for (i = eb; i < eb + ebcnt; i++) {
if (bad_ebs[i - eb])
continue;
if ((eb + erase_cycles) & 1)
patt = patt_5A5;
else
patt = patt_A5A;
err = check_eraseblock(i, patt);
if (err) {
pr_info("verify failed for %s"
" pattern\n",
((eb + erase_cycles) & 1) ?
"0x55AA55..." : "0xAA55AA...");
goto out;
}
err = mtdtest_relax();
if (err)
goto out;
}
}
erase_cycles += 1;
if (erase_cycles % gran == 0) {
long ms;
stop_timing();
ms = ktime_ms_delta(finish, start);
pr_info("%08u erase cycles done, took %lu "
"milliseconds (%lu seconds)\n",
erase_cycles, ms, ms / 1000);
start_timing();
}
if (!infinite && --cycles_count == 0)
break;
}
out:
pr_info("finished after %u erase cycles\n",
erase_cycles);
kfree(bad_ebs);
out_check_buf:
kfree(check_buf);
out_patt_FF:
kfree(patt_FF);
out_patt_A5A:
kfree(patt_A5A);
out_patt_5A5:
kfree(patt_5A5);
out_mtd:
put_mtd_device(mtd);
if (err)
pr_info("error %d occurred during torturing\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
module_init(tort_init);
static void __exit tort_exit(void)
{
return;
}
module_exit(tort_exit);
static int countdiffs(unsigned char *buf, unsigned char *check_buf,
unsigned offset, unsigned len, unsigned *bytesp,
unsigned *bitsp);
static void print_bufs(unsigned char *read, unsigned char *written, int start,
int len);
/*
* Report the detailed information about how the read EB differs from what was
* written.
*/
static void report_corrupt(unsigned char *read, unsigned char *written)
{
int i;
int bytes, bits, pages, first;
int offset, len;
size_t check_len = mtd->erasesize;
if (pgcnt)
check_len = pgcnt * pgsize;
bytes = bits = pages = 0;
for (i = 0; i < check_len; i += pgsize)
if (countdiffs(written, read, i, pgsize, &bytes,
&bits) >= 0)
pages++;
pr_info("verify fails on %d pages, %d bytes/%d bits\n",
pages, bytes, bits);
pr_info("The following is a list of all differences between"
" what was read from flash and what was expected\n");
for (i = 0; i < check_len; i += pgsize) {
cond_resched();
bytes = bits = 0;
first = countdiffs(written, read, i, pgsize, &bytes,
&bits);
if (first < 0)
continue;
printk("-------------------------------------------------------"
"----------------------------------\n");
pr_info("Page %zd has %d bytes/%d bits failing verify,"
" starting at offset 0x%x\n",
(mtd->erasesize - check_len + i) / pgsize,
bytes, bits, first);
offset = first & ~0x7;
len = ((first + bytes) | 0x7) + 1 - offset;
print_bufs(read, written, offset, len);
}
}
static void print_bufs(unsigned char *read, unsigned char *written, int start,
int len)
{
int i = 0, j1, j2;
char *diff;
printk("Offset Read Written\n");
while (i < len) {
printk("0x%08x: ", start + i);
diff = " ";
for (j1 = 0; j1 < 8 && i + j1 < len; j1++) {
printk(" %02x", read[start + i + j1]);
if (read[start + i + j1] != written[start + i + j1])
diff = "***";
}
while (j1 < 8) {
printk(" ");
j1 += 1;
}
printk(" %s ", diff);
for (j2 = 0; j2 < 8 && i + j2 < len; j2++)
printk(" %02x", written[start + i + j2]);
printk("\n");
i += 8;
}
}
/*
* Count the number of differing bytes and bits and return the first differing
* offset.
*/
static int countdiffs(unsigned char *buf, unsigned char *check_buf,
unsigned offset, unsigned len, unsigned *bytesp,
unsigned *bitsp)
{
unsigned i, bit;
int first = -1;
for (i = offset; i < offset + len; i++)
if (buf[i] != check_buf[i]) {
first = i;
break;
}
while (i < offset + len) {
if (buf[i] != check_buf[i]) {
(*bytesp)++;
bit = 1;
while (bit < 256) {
if ((buf[i] & bit) != (check_buf[i] & bit))
(*bitsp)++;
bit <<= 1;
}
}
i++;
}
return first;
}
MODULE_DESCRIPTION("Eraseblock torturing module");
MODULE_AUTHOR("Artem Bityutskiy, Jarkko Lavinen, Adrian Hunter");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/tests/torturetest.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007 Nokia Corporation
*
* Test read and write speed of a MTD device.
*
* Author: Adrian Hunter <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/err.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/random.h>
#include "mtd_test.h"
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
static int count;
module_param(count, int, S_IRUGO);
MODULE_PARM_DESC(count, "Maximum number of eraseblocks to use "
"(0 means use all)");
static struct mtd_info *mtd;
static unsigned char *iobuf;
static unsigned char *bbt;
static int pgsize;
static int ebcnt;
static int pgcnt;
static int goodebcnt;
static ktime_t start, finish;
static int multiblock_erase(int ebnum, int blocks)
{
int err;
struct erase_info ei;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
memset(&ei, 0, sizeof(struct erase_info));
ei.addr = addr;
ei.len = mtd->erasesize * blocks;
err = mtd_erase(mtd, &ei);
if (err) {
pr_err("error %d while erasing EB %d, blocks %d\n",
err, ebnum, blocks);
return err;
}
return 0;
}
static int write_eraseblock(int ebnum)
{
loff_t addr = (loff_t)ebnum * mtd->erasesize;
return mtdtest_write(mtd, addr, mtd->erasesize, iobuf);
}
static int write_eraseblock_by_page(int ebnum)
{
int i, err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < pgcnt; i++) {
err = mtdtest_write(mtd, addr, pgsize, buf);
if (err)
break;
addr += pgsize;
buf += pgsize;
}
return err;
}
static int write_eraseblock_by_2pages(int ebnum)
{
size_t sz = pgsize * 2;
int i, n = pgcnt / 2, err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < n; i++) {
err = mtdtest_write(mtd, addr, sz, buf);
if (err)
return err;
addr += sz;
buf += sz;
}
if (pgcnt % 2)
err = mtdtest_write(mtd, addr, pgsize, buf);
return err;
}
static int read_eraseblock(int ebnum)
{
loff_t addr = (loff_t)ebnum * mtd->erasesize;
return mtdtest_read(mtd, addr, mtd->erasesize, iobuf);
}
static int read_eraseblock_by_page(int ebnum)
{
int i, err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < pgcnt; i++) {
err = mtdtest_read(mtd, addr, pgsize, buf);
if (err)
break;
addr += pgsize;
buf += pgsize;
}
return err;
}
static int read_eraseblock_by_2pages(int ebnum)
{
size_t sz = pgsize * 2;
int i, n = pgcnt / 2, err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < n; i++) {
err = mtdtest_read(mtd, addr, sz, buf);
if (err)
return err;
addr += sz;
buf += sz;
}
if (pgcnt % 2)
err = mtdtest_read(mtd, addr, pgsize, buf);
return err;
}
static inline void start_timing(void)
{
start = ktime_get();
}
static inline void stop_timing(void)
{
finish = ktime_get();
}
static long calc_speed(void)
{
uint64_t k, us;
us = ktime_us_delta(finish, start);
if (us == 0)
return 0;
k = (uint64_t)goodebcnt * (mtd->erasesize / 1024) * 1000000;
do_div(k, us);
return k;
}
static int __init mtd_speedtest_init(void)
{
int err, i, blocks, j, k;
long speed;
uint64_t tmp;
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
pr_info("Please specify a valid mtd-device via module parameter\n");
pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
if (count)
pr_info("MTD device: %d count: %d\n", dev, count);
else
pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
pgsize = mtd->writesize;
tmp = mtd->size;
do_div(tmp, mtd->erasesize);
ebcnt = tmp;
pgcnt = mtd->erasesize / pgsize;
pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
pgsize, ebcnt, pgcnt, mtd->oobsize);
if (count > 0 && count < ebcnt)
ebcnt = count;
err = -ENOMEM;
iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!iobuf)
goto out;
get_random_bytes(iobuf, mtd->erasesize);
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt)
goto out;
err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
for (i = 0; i < ebcnt; i++) {
if (!bbt[i])
goodebcnt++;
}
err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
/* Write all eraseblocks, 1 eraseblock at a time */
pr_info("testing eraseblock write speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = write_eraseblock(i);
if (err)
goto out;
err = mtdtest_relax();
if (err)
goto out;
}
stop_timing();
speed = calc_speed();
pr_info("eraseblock write speed is %ld KiB/s\n", speed);
/* Read all eraseblocks, 1 eraseblock at a time */
pr_info("testing eraseblock read speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = read_eraseblock(i);
if (err)
goto out;
err = mtdtest_relax();
if (err)
goto out;
}
stop_timing();
speed = calc_speed();
pr_info("eraseblock read speed is %ld KiB/s\n", speed);
err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
/* Write all eraseblocks, 1 page at a time */
pr_info("testing page write speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = write_eraseblock_by_page(i);
if (err)
goto out;
err = mtdtest_relax();
if (err)
goto out;
}
stop_timing();
speed = calc_speed();
pr_info("page write speed is %ld KiB/s\n", speed);
/* Read all eraseblocks, 1 page at a time */
pr_info("testing page read speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = read_eraseblock_by_page(i);
if (err)
goto out;
err = mtdtest_relax();
if (err)
goto out;
}
stop_timing();
speed = calc_speed();
pr_info("page read speed is %ld KiB/s\n", speed);
err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
/* Write all eraseblocks, 2 pages at a time */
pr_info("testing 2 page write speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = write_eraseblock_by_2pages(i);
if (err)
goto out;
err = mtdtest_relax();
if (err)
goto out;
}
stop_timing();
speed = calc_speed();
pr_info("2 page write speed is %ld KiB/s\n", speed);
/* Read all eraseblocks, 2 pages at a time */
pr_info("testing 2 page read speed\n");
start_timing();
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = read_eraseblock_by_2pages(i);
if (err)
goto out;
err = mtdtest_relax();
if (err)
goto out;
}
stop_timing();
speed = calc_speed();
pr_info("2 page read speed is %ld KiB/s\n", speed);
/* Erase all eraseblocks */
pr_info("Testing erase speed\n");
start_timing();
err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
stop_timing();
speed = calc_speed();
pr_info("erase speed is %ld KiB/s\n", speed);
/* Multi-block erase all eraseblocks */
for (k = 1; k < 7; k++) {
blocks = 1 << k;
pr_info("Testing %dx multi-block erase speed\n",
blocks);
start_timing();
for (i = 0; i < ebcnt; ) {
for (j = 0; j < blocks && (i + j) < ebcnt; j++)
if (bbt[i + j])
break;
if (j < 1) {
i++;
continue;
}
err = multiblock_erase(i, j);
if (err)
goto out;
err = mtdtest_relax();
if (err)
goto out;
i += j;
}
stop_timing();
speed = calc_speed();
pr_info("%dx multi-block erase speed is %ld KiB/s\n",
blocks, speed);
}
pr_info("finished\n");
out:
kfree(iobuf);
kfree(bbt);
put_mtd_device(mtd);
if (err)
pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
module_init(mtd_speedtest_init);
static void __exit mtd_speedtest_exit(void)
{
return;
}
module_exit(mtd_speedtest_exit);
MODULE_DESCRIPTION("Speed test module");
MODULE_AUTHOR("Adrian Hunter");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/tests/speedtest.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2008 Nokia Corporation
*
* Test OOB read and write on MTD device.
*
* Author: Adrian Hunter <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/div64.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/err.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/random.h>
#include "mtd_test.h"
static int dev = -EINVAL;
static int bitflip_limit;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
module_param(bitflip_limit, int, S_IRUGO);
MODULE_PARM_DESC(bitflip_limit, "Max. allowed bitflips per page");
static struct mtd_info *mtd;
static unsigned char *readbuf;
static unsigned char *writebuf;
static unsigned char *bbt;
static int ebcnt;
static int pgcnt;
static int errcnt;
static int use_offset;
static int use_len;
static int use_len_max;
static int vary_offset;
static struct rnd_state rnd_state;
static void do_vary_offset(void)
{
use_len -= 1;
if (use_len < 1) {
use_offset += 1;
if (use_offset >= use_len_max)
use_offset = 0;
use_len = use_len_max - use_offset;
}
}
static int write_eraseblock(int ebnum)
{
int i;
struct mtd_oob_ops ops = { };
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = use_len;
ops.oobretlen = 0;
ops.ooboffs = use_offset;
ops.datbuf = NULL;
ops.oobbuf = writebuf + (use_len_max * i) + use_offset;
err = mtd_write_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
pr_err("error: writeoob failed at %#llx\n",
(long long)addr);
pr_err("error: use_len %d, use_offset %d\n",
use_len, use_offset);
errcnt += 1;
return err ? err : -1;
}
if (vary_offset)
do_vary_offset();
}
return err;
}
static int write_whole_device(void)
{
int err;
unsigned int i;
pr_info("writing OOBs of whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = write_eraseblock(i);
if (err)
return err;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
err = mtdtest_relax();
if (err)
return err;
}
pr_info("written %u eraseblocks\n", i);
return 0;
}
/*
* Display the address, offset and data bytes at comparison failure.
* Return number of bitflips encountered.
*/
static size_t memcmpshowoffset(loff_t addr, loff_t offset, const void *cs,
const void *ct, size_t count)
{
const unsigned char *su1, *su2;
int res;
size_t i = 0;
size_t bitflips = 0;
for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--, i++) {
res = *su1 ^ *su2;
if (res) {
pr_info("error @addr[0x%lx:0x%lx] 0x%x -> 0x%x diff 0x%x\n",
(unsigned long)addr, (unsigned long)offset + i,
*su1, *su2, res);
bitflips += hweight8(res);
}
}
return bitflips;
}
#define memcmpshow(addr, cs, ct, count) memcmpshowoffset((addr), 0, (cs), (ct),\
(count))
/*
* Compare with 0xff and show the address, offset and data bytes at
* comparison failure. Return number of bitflips encountered.
*/
static size_t memffshow(loff_t addr, loff_t offset, const void *cs,
size_t count)
{
const unsigned char *su1;
int res;
size_t i = 0;
size_t bitflips = 0;
for (su1 = cs; 0 < count; ++su1, count--, i++) {
res = *su1 ^ 0xff;
if (res) {
pr_info("error @addr[0x%lx:0x%lx] 0x%x -> 0xff diff 0x%x\n",
(unsigned long)addr, (unsigned long)offset + i,
*su1, res);
bitflips += hweight8(res);
}
}
return bitflips;
}
static int verify_eraseblock(int ebnum)
{
int i;
struct mtd_oob_ops ops = { };
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
size_t bitflips;
prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = use_len;
ops.oobretlen = 0;
ops.ooboffs = use_offset;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
if (mtd_is_bitflip(err))
err = 0;
if (err || ops.oobretlen != use_len) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
errcnt += 1;
return err ? err : -1;
}
bitflips = memcmpshow(addr, readbuf,
writebuf + (use_len_max * i) + use_offset,
use_len);
if (bitflips > bitflip_limit) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
pr_err("error: too many errors\n");
return -1;
}
} else if (bitflips) {
pr_info("ignoring error as within bitflip_limit\n");
}
if (use_offset != 0 || use_len < mtd->oobavail) {
int k;
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->oobavail;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
if (mtd_is_bitflip(err))
err = 0;
if (err || ops.oobretlen != mtd->oobavail) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
errcnt += 1;
return err ? err : -1;
}
bitflips = memcmpshowoffset(addr, use_offset,
readbuf + use_offset,
writebuf + (use_len_max * i) + use_offset,
use_len);
/* verify pre-offset area for 0xff */
bitflips += memffshow(addr, 0, readbuf, use_offset);
/* verify post-(use_offset + use_len) area for 0xff */
k = use_offset + use_len;
bitflips += memffshow(addr, k, readbuf + k,
mtd->oobavail - k);
if (bitflips > bitflip_limit) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
pr_err("error: too many errors\n");
return -1;
}
} else if (bitflips) {
pr_info("ignoring errors as within bitflip limit\n");
}
}
if (vary_offset)
do_vary_offset();
}
return err;
}
static int verify_eraseblock_in_one_go(int ebnum)
{
struct mtd_oob_ops ops = { };
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
size_t len = mtd->oobavail * pgcnt;
size_t oobavail = mtd->oobavail;
size_t bitflips;
int i;
prandom_bytes_state(&rnd_state, writebuf, len);
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = len;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
/* read entire block's OOB at one go */
err = mtd_read_oob(mtd, addr, &ops);
if (mtd_is_bitflip(err))
err = 0;
if (err || ops.oobretlen != len) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
errcnt += 1;
return err ? err : -1;
}
/* verify one page OOB at a time for bitflip per page limit check */
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
bitflips = memcmpshow(addr, readbuf + (i * oobavail),
writebuf + (i * oobavail), oobavail);
if (bitflips > bitflip_limit) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
pr_err("error: too many errors\n");
return -1;
}
} else if (bitflips) {
pr_info("ignoring error as within bitflip_limit\n");
}
}
return err;
}
static int verify_all_eraseblocks(void)
{
int err;
unsigned int i;
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = verify_eraseblock(i);
if (err)
return err;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
err = mtdtest_relax();
if (err)
return err;
}
pr_info("verified %u eraseblocks\n", i);
return 0;
}
static int __init mtd_oobtest_init(void)
{
int err = 0;
unsigned int i;
uint64_t tmp;
struct mtd_oob_ops ops = { };
loff_t addr = 0, addr0;
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
pr_info("Please specify a valid mtd-device via module parameter\n");
pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
pr_err("error: cannot get MTD device\n");
return err;
}
if (!mtd_type_is_nand(mtd)) {
pr_info("this test requires NAND flash\n");
goto out;
}
tmp = mtd->size;
do_div(tmp, mtd->erasesize);
ebcnt = tmp;
pgcnt = mtd->erasesize / mtd->writesize;
pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
mtd->writesize, ebcnt, pgcnt, mtd->oobsize);
err = -ENOMEM;
readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!readbuf)
goto out;
writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!writebuf)
goto out;
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt)
goto out;
err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
use_offset = 0;
use_len = mtd->oobavail;
use_len_max = mtd->oobavail;
vary_offset = 0;
/* First test: write all OOB, read it back and verify */
pr_info("test 1 of 5\n");
err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
prandom_seed_state(&rnd_state, 1);
err = write_whole_device();
if (err)
goto out;
prandom_seed_state(&rnd_state, 1);
err = verify_all_eraseblocks();
if (err)
goto out;
/*
* Second test: write all OOB, a block at a time, read it back and
* verify.
*/
pr_info("test 2 of 5\n");
err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
prandom_seed_state(&rnd_state, 3);
err = write_whole_device();
if (err)
goto out;
/* Check all eraseblocks */
prandom_seed_state(&rnd_state, 3);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = verify_eraseblock_in_one_go(i);
if (err)
goto out;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
err = mtdtest_relax();
if (err)
goto out;
}
pr_info("verified %u eraseblocks\n", i);
/*
* Third test: write OOB at varying offsets and lengths, read it back
* and verify.
*/
pr_info("test 3 of 5\n");
err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
/* Write all eraseblocks */
use_offset = 0;
use_len = mtd->oobavail;
use_len_max = mtd->oobavail;
vary_offset = 1;
prandom_seed_state(&rnd_state, 5);
err = write_whole_device();
if (err)
goto out;
/* Check all eraseblocks */
use_offset = 0;
use_len = mtd->oobavail;
use_len_max = mtd->oobavail;
vary_offset = 1;
prandom_seed_state(&rnd_state, 5);
err = verify_all_eraseblocks();
if (err)
goto out;
use_offset = 0;
use_len = mtd->oobavail;
use_len_max = mtd->oobavail;
vary_offset = 0;
/* Fourth test: try to write off end of device */
pr_info("test 4 of 5\n");
err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
addr0 = 0;
for (i = 0; i < ebcnt && bbt[i]; ++i)
addr0 += mtd->erasesize;
/* Attempt to write off end of OOB */
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = 1;
ops.oobretlen = 0;
ops.ooboffs = mtd->oobavail;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
pr_info("attempting to start write past end of OOB\n");
pr_info("an error is expected...\n");
err = mtd_write_oob(mtd, addr0, &ops);
if (err) {
pr_info("error occurred as expected\n");
} else {
pr_err("error: can write past end of OOB\n");
errcnt += 1;
}
/* Attempt to read off end of OOB */
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = 1;
ops.oobretlen = 0;
ops.ooboffs = mtd->oobavail;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
pr_info("attempting to start read past end of OOB\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, addr0, &ops);
if (mtd_is_bitflip(err))
err = 0;
if (err) {
pr_info("error occurred as expected\n");
} else {
pr_err("error: can read past end of OOB\n");
errcnt += 1;
}
if (bbt[ebcnt - 1])
pr_info("skipping end of device tests because last "
"block is bad\n");
else {
/* Attempt to write off end of device */
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->oobavail + 1;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
pr_info("attempting to write past end of device\n");
pr_info("an error is expected...\n");
err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
pr_info("error occurred as expected\n");
} else {
pr_err("error: wrote past end of device\n");
errcnt += 1;
}
/* Attempt to read off end of device */
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->oobavail + 1;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
pr_info("attempting to read past end of device\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (mtd_is_bitflip(err))
err = 0;
if (err) {
pr_info("error occurred as expected\n");
} else {
pr_err("error: read past end of device\n");
errcnt += 1;
}
err = mtdtest_erase_eraseblock(mtd, ebcnt - 1);
if (err)
goto out;
/* Attempt to write off end of device */
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->oobavail;
ops.oobretlen = 0;
ops.ooboffs = 1;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
pr_info("attempting to write past end of device\n");
pr_info("an error is expected...\n");
err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
pr_info("error occurred as expected\n");
} else {
pr_err("error: wrote past end of device\n");
errcnt += 1;
}
/* Attempt to read off end of device */
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->oobavail;
ops.oobretlen = 0;
ops.ooboffs = 1;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
pr_info("attempting to read past end of device\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (mtd_is_bitflip(err))
err = 0;
if (err) {
pr_info("error occurred as expected\n");
} else {
pr_err("error: read past end of device\n");
errcnt += 1;
}
}
/* Fifth test: write / read across block boundaries */
pr_info("test 5 of 5\n");
/* Erase all eraseblocks */
err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
/* Write all eraseblocks */
prandom_seed_state(&rnd_state, 11);
pr_info("writing OOBs of whole device\n");
for (i = 0; i < ebcnt - 1; ++i) {
int cnt = 2;
int pg;
size_t sz = mtd->oobavail;
if (bbt[i] || bbt[i + 1])
continue;
addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
prandom_bytes_state(&rnd_state, writebuf, sz * cnt);
for (pg = 0; pg < cnt; ++pg) {
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = sz;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = writebuf + pg * sz;
err = mtd_write_oob(mtd, addr, &ops);
if (err)
goto out;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
err = mtdtest_relax();
if (err)
goto out;
addr += mtd->writesize;
}
}
pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
prandom_seed_state(&rnd_state, 11);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt - 1; ++i) {
if (bbt[i] || bbt[i + 1])
continue;
prandom_bytes_state(&rnd_state, writebuf, mtd->oobavail * 2);
addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
ops.ooblen = mtd->oobavail * 2;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
if (mtd_is_bitflip(err))
err = 0;
if (err)
goto out;
if (memcmpshow(addr, readbuf, writebuf,
mtd->oobavail * 2)) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
if (errcnt > 1000) {
err = -EINVAL;
pr_err("error: too many errors\n");
goto out;
}
}
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
err = mtdtest_relax();
if (err)
goto out;
}
pr_info("verified %u eraseblocks\n", i);
pr_info("finished with %d errors\n", errcnt);
out:
kfree(bbt);
kfree(writebuf);
kfree(readbuf);
put_mtd_device(mtd);
if (err)
pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
module_init(mtd_oobtest_init);
static void __exit mtd_oobtest_exit(void)
{
return;
}
module_exit(mtd_oobtest_exit);
MODULE_DESCRIPTION("Out-of-band test module");
MODULE_AUTHOR("Adrian Hunter");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/tests/oobtest.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2007 Nokia Corporation
*
* Test sub-page read and write on MTD device.
* Author: Adrian Hunter <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/err.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/random.h>
#include "mtd_test.h"
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
static struct mtd_info *mtd;
static unsigned char *writebuf;
static unsigned char *readbuf;
static unsigned char *bbt;
static int subpgsize;
static int bufsize;
static int ebcnt;
static int pgcnt;
static int errcnt;
static struct rnd_state rnd_state;
static inline void clear_data(unsigned char *buf, size_t len)
{
memset(buf, 0, len);
}
static int write_eraseblock(int ebnum)
{
size_t written;
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
prandom_bytes_state(&rnd_state, writebuf, subpgsize);
err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
pr_err("error: write failed at %#llx\n",
(long long)addr);
if (written != subpgsize) {
pr_err(" write size: %#x\n", subpgsize);
pr_err(" written: %#zx\n", written);
}
return err ? err : -1;
}
addr += subpgsize;
prandom_bytes_state(&rnd_state, writebuf, subpgsize);
err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
pr_err("error: write failed at %#llx\n",
(long long)addr);
if (written != subpgsize) {
pr_err(" write size: %#x\n", subpgsize);
pr_err(" written: %#zx\n", written);
}
return err ? err : -1;
}
return err;
}
static int write_eraseblock2(int ebnum)
{
size_t written;
int err = 0, k;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
for (k = 1; k < 33; ++k) {
if (addr + (subpgsize * k) > (loff_t)(ebnum + 1) * mtd->erasesize)
break;
prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
if (unlikely(err || written != subpgsize * k)) {
pr_err("error: write failed at %#llx\n",
(long long)addr);
if (written != subpgsize * k) {
pr_err(" write size: %#x\n",
subpgsize * k);
pr_err(" written: %#08zx\n",
written);
}
return err ? err : -1;
}
addr += subpgsize * k;
}
return err;
}
static void print_subpage(unsigned char *p)
{
int i, j;
for (i = 0; i < subpgsize; ) {
for (j = 0; i < subpgsize && j < 32; ++i, ++j)
printk("%02x", *p++);
printk("\n");
}
}
static int verify_eraseblock(int ebnum)
{
size_t read;
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
prandom_bytes_state(&rnd_state, writebuf, subpgsize);
clear_data(readbuf, subpgsize);
err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
pr_err("error: read failed at %#llx\n",
(long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
pr_info("------------- written----------------\n");
print_subpage(writebuf);
pr_info("------------- read ------------------\n");
print_subpage(readbuf);
pr_info("-------------------------------------\n");
errcnt += 1;
}
addr += subpgsize;
prandom_bytes_state(&rnd_state, writebuf, subpgsize);
clear_data(readbuf, subpgsize);
err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
pr_err("error: read failed at %#llx\n",
(long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
pr_info("error: verify failed at %#llx\n",
(long long)addr);
pr_info("------------- written----------------\n");
print_subpage(writebuf);
pr_info("------------- read ------------------\n");
print_subpage(readbuf);
pr_info("-------------------------------------\n");
errcnt += 1;
}
return err;
}
static int verify_eraseblock2(int ebnum)
{
size_t read;
int err = 0, k;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
for (k = 1; k < 33; ++k) {
if (addr + (subpgsize * k) > (loff_t)(ebnum + 1) * mtd->erasesize)
break;
prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
clear_data(readbuf, subpgsize * k);
err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf);
if (unlikely(err || read != subpgsize * k)) {
if (mtd_is_bitflip(err) && read == subpgsize * k) {
pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
pr_err("error: read failed at "
"%#llx\n", (long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize * k))) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
}
addr += subpgsize * k;
}
return err;
}
static int verify_eraseblock_ff(int ebnum)
{
uint32_t j;
size_t read;
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
memset(writebuf, 0xff, subpgsize);
for (j = 0; j < mtd->erasesize / subpgsize; ++j) {
clear_data(readbuf, subpgsize);
err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
pr_info("ECC correction at %#llx\n",
(long long)addr);
err = 0;
} else {
pr_err("error: read failed at "
"%#llx\n", (long long)addr);
return err ? err : -1;
}
}
if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
pr_err("error: verify 0xff failed at "
"%#llx\n", (long long)addr);
errcnt += 1;
}
addr += subpgsize;
}
return err;
}
static int verify_all_eraseblocks_ff(void)
{
int err;
unsigned int i;
pr_info("verifying all eraseblocks for 0xff\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = verify_eraseblock_ff(i);
if (err)
return err;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
err = mtdtest_relax();
if (err)
return err;
}
pr_info("verified %u eraseblocks\n", i);
return 0;
}
static int __init mtd_subpagetest_init(void)
{
int err = 0;
uint32_t i;
uint64_t tmp;
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
pr_info("Please specify a valid mtd-device via module parameter\n");
pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
pr_err("error: cannot get MTD device\n");
return err;
}
if (!mtd_type_is_nand(mtd)) {
pr_info("this test requires NAND flash\n");
goto out;
}
subpgsize = mtd->writesize >> mtd->subpage_sft;
tmp = mtd->size;
do_div(tmp, mtd->erasesize);
ebcnt = tmp;
pgcnt = mtd->erasesize / mtd->writesize;
pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, subpage size %u, count of eraseblocks %u, "
"pages per eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
mtd->writesize, subpgsize, ebcnt, pgcnt, mtd->oobsize);
err = -ENOMEM;
bufsize = subpgsize * 32;
writebuf = kmalloc(bufsize, GFP_KERNEL);
if (!writebuf)
goto out;
readbuf = kmalloc(bufsize, GFP_KERNEL);
if (!readbuf)
goto out;
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt)
goto out;
err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
pr_info("writing whole device\n");
prandom_seed_state(&rnd_state, 1);
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = write_eraseblock(i);
if (unlikely(err))
goto out;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
err = mtdtest_relax();
if (err)
goto out;
}
pr_info("written %u eraseblocks\n", i);
prandom_seed_state(&rnd_state, 1);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = verify_eraseblock(i);
if (unlikely(err))
goto out;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
err = mtdtest_relax();
if (err)
goto out;
}
pr_info("verified %u eraseblocks\n", i);
err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
err = verify_all_eraseblocks_ff();
if (err)
goto out;
/* Write all eraseblocks */
prandom_seed_state(&rnd_state, 3);
pr_info("writing whole device\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = write_eraseblock2(i);
if (unlikely(err))
goto out;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
err = mtdtest_relax();
if (err)
goto out;
}
pr_info("written %u eraseblocks\n", i);
/* Check all eraseblocks */
prandom_seed_state(&rnd_state, 3);
pr_info("verifying all eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
if (bbt[i])
continue;
err = verify_eraseblock2(i);
if (unlikely(err))
goto out;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
err = mtdtest_relax();
if (err)
goto out;
}
pr_info("verified %u eraseblocks\n", i);
err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
err = verify_all_eraseblocks_ff();
if (err)
goto out;
pr_info("finished with %d errors\n", errcnt);
out:
kfree(bbt);
kfree(readbuf);
kfree(writebuf);
put_mtd_device(mtd);
if (err)
pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
module_init(mtd_subpagetest_init);
static void __exit mtd_subpagetest_exit(void)
{
return;
}
module_exit(mtd_subpagetest_exit);
MODULE_DESCRIPTION("Subpage test module");
MODULE_AUTHOR("Adrian Hunter");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/tests/subpagetest.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2008 Nokia Corporation
*
* Test random reads, writes and erases on MTD device.
*
* Author: Adrian Hunter <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/err.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/random.h>
#include "mtd_test.h"
static int dev = -EINVAL;
module_param(dev, int, S_IRUGO);
MODULE_PARM_DESC(dev, "MTD device number to use");
static int count = 10000;
module_param(count, int, S_IRUGO);
MODULE_PARM_DESC(count, "Number of operations to do (default is 10000)");
static struct mtd_info *mtd;
static unsigned char *writebuf;
static unsigned char *readbuf;
static unsigned char *bbt;
static int *offsets;
static int pgsize;
static int bufsize;
static int ebcnt;
static int pgcnt;
static int rand_eb(void)
{
unsigned int eb;
again:
/* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
eb = get_random_u32_below(ebcnt - 1);
if (bbt[eb])
goto again;
return eb;
}
static int rand_offs(void)
{
return get_random_u32_below(bufsize);
}
static int rand_len(int offs)
{
return get_random_u32_below(bufsize - offs);
}
static int do_read(void)
{
int eb = rand_eb();
int offs = rand_offs();
int len = rand_len(offs);
loff_t addr;
if (bbt[eb + 1]) {
if (offs >= mtd->erasesize)
offs -= mtd->erasesize;
if (offs + len > mtd->erasesize)
len = mtd->erasesize - offs;
}
addr = (loff_t)eb * mtd->erasesize + offs;
return mtdtest_read(mtd, addr, len, readbuf);
}
static int do_write(void)
{
int eb = rand_eb(), offs, err, len;
loff_t addr;
offs = offsets[eb];
if (offs >= mtd->erasesize) {
err = mtdtest_erase_eraseblock(mtd, eb);
if (err)
return err;
offs = offsets[eb] = 0;
}
len = rand_len(offs);
len = ((len + pgsize - 1) / pgsize) * pgsize;
if (offs + len > mtd->erasesize) {
if (bbt[eb + 1])
len = mtd->erasesize - offs;
else {
err = mtdtest_erase_eraseblock(mtd, eb + 1);
if (err)
return err;
offsets[eb + 1] = 0;
}
}
addr = (loff_t)eb * mtd->erasesize + offs;
err = mtdtest_write(mtd, addr, len, writebuf);
if (unlikely(err))
return err;
offs += len;
while (offs > mtd->erasesize) {
offsets[eb++] = mtd->erasesize;
offs -= mtd->erasesize;
}
offsets[eb] = offs;
return 0;
}
static int do_operation(void)
{
if (get_random_u32_below(2))
return do_read();
else
return do_write();
}
static int __init mtd_stresstest_init(void)
{
int err;
int i, op;
uint64_t tmp;
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
if (dev < 0) {
pr_info("Please specify a valid mtd-device via module parameter\n");
pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
return -EINVAL;
}
pr_info("MTD device: %d\n", dev);
mtd = get_mtd_device(NULL, dev);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
pr_err("error: cannot get MTD device\n");
return err;
}
if (mtd->writesize == 1) {
pr_info("not NAND flash, assume page size is 512 "
"bytes.\n");
pgsize = 512;
} else
pgsize = mtd->writesize;
tmp = mtd->size;
do_div(tmp, mtd->erasesize);
ebcnt = tmp;
pgcnt = mtd->erasesize / pgsize;
pr_info("MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
"eraseblock %u, OOB size %u\n",
(unsigned long long)mtd->size, mtd->erasesize,
pgsize, ebcnt, pgcnt, mtd->oobsize);
if (ebcnt < 2) {
pr_err("error: need at least 2 eraseblocks\n");
err = -ENOSPC;
goto out_put_mtd;
}
/* Read or write up 2 eraseblocks at a time */
bufsize = mtd->erasesize * 2;
err = -ENOMEM;
readbuf = vmalloc(bufsize);
writebuf = vmalloc(bufsize);
offsets = kmalloc_array(ebcnt, sizeof(int), GFP_KERNEL);
if (!readbuf || !writebuf || !offsets)
goto out;
for (i = 0; i < ebcnt; i++)
offsets[i] = mtd->erasesize;
get_random_bytes(writebuf, bufsize);
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt)
goto out;
err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
if (err)
goto out;
/* Do operations */
pr_info("doing operations\n");
for (op = 0; op < count; op++) {
if ((op & 1023) == 0)
pr_info("%d operations done\n", op);
err = do_operation();
if (err)
goto out;
err = mtdtest_relax();
if (err)
goto out;
}
pr_info("finished, %d operations done\n", op);
out:
kfree(offsets);
kfree(bbt);
vfree(writebuf);
vfree(readbuf);
out_put_mtd:
put_mtd_device(mtd);
if (err)
pr_info("error %d occurred\n", err);
printk(KERN_INFO "=================================================\n");
return err;
}
module_init(mtd_stresstest_init);
static void __exit mtd_stresstest_exit(void)
{
return;
}
module_exit(mtd_stresstest_exit);
MODULE_DESCRIPTION("Stress test module");
MODULE_AUTHOR("Adrian Hunter");
MODULE_LICENSE("GPL");
| linux-master | drivers/mtd/tests/stresstest.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* MTK ECC controller driver.
* Copyright (C) 2016 MediaTek Inc.
* Authors: Xiaolei Li <[email protected]>
* Jorge Ramirez-Ortiz <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/mutex.h>
#include <linux/mtd/nand-ecc-mtk.h>
#define ECC_IDLE_MASK BIT(0)
#define ECC_IRQ_EN BIT(0)
#define ECC_PG_IRQ_SEL BIT(1)
#define ECC_OP_ENABLE (1)
#define ECC_OP_DISABLE (0)
#define ECC_ENCCON (0x00)
#define ECC_ENCCNFG (0x04)
#define ECC_MS_SHIFT (16)
#define ECC_ENCDIADDR (0x08)
#define ECC_ENCIDLE (0x0C)
#define ECC_DECCON (0x100)
#define ECC_DECCNFG (0x104)
#define DEC_EMPTY_EN BIT(31)
#define DEC_CNFG_CORRECT (0x3 << 12)
#define ECC_DECIDLE (0x10C)
#define ECC_DECENUM0 (0x114)
#define ECC_TIMEOUT (500000)
#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
#define ECC_ERRMASK_MT7622 GENMASK(4, 0)
#define ECC_ERRMASK_MT2701 GENMASK(5, 0)
#define ECC_ERRMASK_MT2712 GENMASK(6, 0)
struct mtk_ecc_caps {
u32 err_mask;
u32 err_shift;
const u8 *ecc_strength;
const u32 *ecc_regs;
u8 num_ecc_strength;
u8 ecc_mode_shift;
u32 parity_bits;
int pg_irq_sel;
};
struct mtk_ecc {
struct device *dev;
const struct mtk_ecc_caps *caps;
void __iomem *regs;
struct clk *clk;
struct completion done;
struct mutex lock;
u32 sectors;
u8 *eccdata;
};
/* ecc strength that each IP supports */
static const u8 ecc_strength_mt2701[] = {
4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
40, 44, 48, 52, 56, 60
};
static const u8 ecc_strength_mt2712[] = {
4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
40, 44, 48, 52, 56, 60, 68, 72, 80
};
static const u8 ecc_strength_mt7622[] = {
4, 6, 8, 10, 12
};
static const u8 ecc_strength_mt7986[] = {
4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24
};
enum mtk_ecc_regs {
ECC_ENCPAR00,
ECC_ENCIRQ_EN,
ECC_ENCIRQ_STA,
ECC_DECDONE,
ECC_DECIRQ_EN,
ECC_DECIRQ_STA,
};
static int mt2701_ecc_regs[] = {
[ECC_ENCPAR00] = 0x10,
[ECC_ENCIRQ_EN] = 0x80,
[ECC_ENCIRQ_STA] = 0x84,
[ECC_DECDONE] = 0x124,
[ECC_DECIRQ_EN] = 0x200,
[ECC_DECIRQ_STA] = 0x204,
};
static int mt2712_ecc_regs[] = {
[ECC_ENCPAR00] = 0x300,
[ECC_ENCIRQ_EN] = 0x80,
[ECC_ENCIRQ_STA] = 0x84,
[ECC_DECDONE] = 0x124,
[ECC_DECIRQ_EN] = 0x200,
[ECC_DECIRQ_STA] = 0x204,
};
static int mt7622_ecc_regs[] = {
[ECC_ENCPAR00] = 0x10,
[ECC_ENCIRQ_EN] = 0x30,
[ECC_ENCIRQ_STA] = 0x34,
[ECC_DECDONE] = 0x11c,
[ECC_DECIRQ_EN] = 0x140,
[ECC_DECIRQ_STA] = 0x144,
};
static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
enum mtk_ecc_operation op)
{
struct device *dev = ecc->dev;
u32 val;
int ret;
ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val,
val & ECC_IDLE_MASK,
10, ECC_TIMEOUT);
if (ret)
dev_warn(dev, "%s NOT idle\n",
op == ECC_ENCODE ? "encoder" : "decoder");
}
static irqreturn_t mtk_ecc_irq(int irq, void *id)
{
struct mtk_ecc *ecc = id;
u32 dec, enc;
dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA])
& ECC_IRQ_EN;
if (dec) {
dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
if (dec & ecc->sectors) {
/*
* Clear decode IRQ status once again to ensure that
* there will be no extra IRQ.
*/
readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]);
ecc->sectors = 0;
complete(&ecc->done);
} else {
return IRQ_HANDLED;
}
} else {
enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA])
& ECC_IRQ_EN;
if (enc)
complete(&ecc->done);
else
return IRQ_NONE;
}
return IRQ_HANDLED;
}
static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
{
u32 ecc_bit, dec_sz, enc_sz;
u32 reg, i;
for (i = 0; i < ecc->caps->num_ecc_strength; i++) {
if (ecc->caps->ecc_strength[i] == config->strength)
break;
}
if (i == ecc->caps->num_ecc_strength) {
dev_err(ecc->dev, "invalid ecc strength %d\n",
config->strength);
return -EINVAL;
}
ecc_bit = i;
if (config->op == ECC_ENCODE) {
/* configure ECC encoder (in bits) */
enc_sz = config->len << 3;
reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
reg |= (enc_sz << ECC_MS_SHIFT);
writel(reg, ecc->regs + ECC_ENCCNFG);
if (config->mode != ECC_NFI_MODE)
writel(lower_32_bits(config->addr),
ecc->regs + ECC_ENCDIADDR);
} else {
/* configure ECC decoder (in bits) */
dec_sz = (config->len << 3) +
config->strength * ecc->caps->parity_bits;
reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
reg |= DEC_EMPTY_EN;
writel(reg, ecc->regs + ECC_DECCNFG);
if (config->sectors)
ecc->sectors = 1 << (config->sectors - 1);
}
return 0;
}
void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
int sectors)
{
u32 offset, i, err;
u32 bitflips = 0;
stats->corrected = 0;
stats->failed = 0;
for (i = 0; i < sectors; i++) {
offset = (i >> 2) << 2;
err = readl(ecc->regs + ECC_DECENUM0 + offset);
err = err >> ((i % 4) * ecc->caps->err_shift);
err &= ecc->caps->err_mask;
if (err == ecc->caps->err_mask) {
/* uncorrectable errors */
stats->failed++;
continue;
}
stats->corrected += err;
bitflips = max_t(u32, bitflips, err);
}
stats->bitflips = bitflips;
}
EXPORT_SYMBOL(mtk_ecc_get_stats);
void mtk_ecc_release(struct mtk_ecc *ecc)
{
clk_disable_unprepare(ecc->clk);
put_device(ecc->dev);
}
EXPORT_SYMBOL(mtk_ecc_release);
static void mtk_ecc_hw_init(struct mtk_ecc *ecc)
{
mtk_ecc_wait_idle(ecc, ECC_ENCODE);
writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON);
mtk_ecc_wait_idle(ecc, ECC_DECODE);
writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON);
}
static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
{
struct platform_device *pdev;
struct mtk_ecc *ecc;
pdev = of_find_device_by_node(np);
if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
ecc = platform_get_drvdata(pdev);
if (!ecc) {
put_device(&pdev->dev);
return ERR_PTR(-EPROBE_DEFER);
}
clk_prepare_enable(ecc->clk);
mtk_ecc_hw_init(ecc);
return ecc;
}
struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
{
struct mtk_ecc *ecc = NULL;
struct device_node *np;
np = of_parse_phandle(of_node, "nand-ecc-engine", 0);
/* for backward compatibility */
if (!np)
np = of_parse_phandle(of_node, "ecc-engine", 0);
if (np) {
ecc = mtk_ecc_get(np);
of_node_put(np);
}
return ecc;
}
EXPORT_SYMBOL(of_mtk_ecc_get);
int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
{
enum mtk_ecc_operation op = config->op;
u16 reg_val;
int ret;
ret = mutex_lock_interruptible(&ecc->lock);
if (ret) {
dev_err(ecc->dev, "interrupted when attempting to lock\n");
return ret;
}
mtk_ecc_wait_idle(ecc, op);
ret = mtk_ecc_config(ecc, config);
if (ret) {
mutex_unlock(&ecc->lock);
return ret;
}
if (config->mode != ECC_NFI_MODE || op != ECC_ENCODE) {
init_completion(&ecc->done);
reg_val = ECC_IRQ_EN;
/*
* For ECC_NFI_MODE, if ecc->caps->pg_irq_sel is 1, then it
* means this chip can only generate one ecc irq during page
* read / write. If is 0, generate one ecc irq each ecc step.
*/
if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE)
reg_val |= ECC_PG_IRQ_SEL;
if (op == ECC_ENCODE)
writew(reg_val, ecc->regs +
ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
else
writew(reg_val, ecc->regs +
ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
}
writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
return 0;
}
EXPORT_SYMBOL(mtk_ecc_enable);
void mtk_ecc_disable(struct mtk_ecc *ecc)
{
enum mtk_ecc_operation op = ECC_ENCODE;
/* find out the running operation */
if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE)
op = ECC_DECODE;
/* disable it */
mtk_ecc_wait_idle(ecc, op);
if (op == ECC_DECODE) {
/*
* Clear decode IRQ status in case there is a timeout to wait
* decode IRQ.
*/
readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
} else {
writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
}
writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
mutex_unlock(&ecc->lock);
}
EXPORT_SYMBOL(mtk_ecc_disable);
int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op)
{
int ret;
ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
if (!ret) {
dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
(op == ECC_ENCODE) ? "encoder" : "decoder");
return -ETIMEDOUT;
}
return 0;
}
EXPORT_SYMBOL(mtk_ecc_wait_done);
int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
u8 *data, u32 bytes)
{
dma_addr_t addr;
u32 len;
int ret;
addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
ret = dma_mapping_error(ecc->dev, addr);
if (ret) {
dev_err(ecc->dev, "dma mapping error\n");
return -EINVAL;
}
config->op = ECC_ENCODE;
config->addr = addr;
ret = mtk_ecc_enable(ecc, config);
if (ret) {
dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
return ret;
}
ret = mtk_ecc_wait_done(ecc, ECC_ENCODE);
if (ret)
goto timeout;
mtk_ecc_wait_idle(ecc, ECC_ENCODE);
/* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
/* write the parity bytes generated by the ECC back to temp buffer */
__ioread32_copy(ecc->eccdata,
ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00],
round_up(len, 4));
/* copy into possibly unaligned OOB region with actual length */
memcpy(data + bytes, ecc->eccdata, len);
timeout:
dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
mtk_ecc_disable(ecc);
return ret;
}
EXPORT_SYMBOL(mtk_ecc_encode);
void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p)
{
const u8 *ecc_strength = ecc->caps->ecc_strength;
int i;
for (i = 0; i < ecc->caps->num_ecc_strength; i++) {
if (*p <= ecc_strength[i]) {
if (!i)
*p = ecc_strength[i];
else if (*p != ecc_strength[i])
*p = ecc_strength[i - 1];
return;
}
}
*p = ecc_strength[ecc->caps->num_ecc_strength - 1];
}
EXPORT_SYMBOL(mtk_ecc_adjust_strength);
unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc)
{
return ecc->caps->parity_bits;
}
EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
.err_mask = ECC_ERRMASK_MT2701,
.err_shift = 8,
.ecc_strength = ecc_strength_mt2701,
.ecc_regs = mt2701_ecc_regs,
.num_ecc_strength = 20,
.ecc_mode_shift = 5,
.parity_bits = 14,
.pg_irq_sel = 0,
};
static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
.err_mask = ECC_ERRMASK_MT2712,
.err_shift = 8,
.ecc_strength = ecc_strength_mt2712,
.ecc_regs = mt2712_ecc_regs,
.num_ecc_strength = 23,
.ecc_mode_shift = 5,
.parity_bits = 14,
.pg_irq_sel = 1,
};
static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
.err_mask = ECC_ERRMASK_MT7622,
.err_shift = 5,
.ecc_strength = ecc_strength_mt7622,
.ecc_regs = mt7622_ecc_regs,
.num_ecc_strength = 5,
.ecc_mode_shift = 4,
.parity_bits = 13,
.pg_irq_sel = 0,
};
static const struct mtk_ecc_caps mtk_ecc_caps_mt7986 = {
.err_mask = ECC_ERRMASK_MT7622,
.err_shift = 8,
.ecc_strength = ecc_strength_mt7986,
.ecc_regs = mt2712_ecc_regs,
.num_ecc_strength = 11,
.ecc_mode_shift = 5,
.parity_bits = 14,
.pg_irq_sel = 1,
};
static const struct of_device_id mtk_ecc_dt_match[] = {
{
.compatible = "mediatek,mt2701-ecc",
.data = &mtk_ecc_caps_mt2701,
}, {
.compatible = "mediatek,mt2712-ecc",
.data = &mtk_ecc_caps_mt2712,
}, {
.compatible = "mediatek,mt7622-ecc",
.data = &mtk_ecc_caps_mt7622,
}, {
.compatible = "mediatek,mt7986-ecc",
.data = &mtk_ecc_caps_mt7986,
},
{},
};
static int mtk_ecc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_ecc *ecc;
u32 max_eccdata_size;
int irq, ret;
ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
if (!ecc)
return -ENOMEM;
ecc->caps = of_device_get_match_data(dev);
max_eccdata_size = ecc->caps->num_ecc_strength - 1;
max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];
max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3;
max_eccdata_size = round_up(max_eccdata_size, 4);
ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL);
if (!ecc->eccdata)
return -ENOMEM;
ecc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ecc->regs))
return PTR_ERR(ecc->regs);
ecc->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ecc->clk)) {
dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
return PTR_ERR(ecc->clk);
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = dma_set_mask(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "failed to set DMA mask\n");
return ret;
}
ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
if (ret) {
dev_err(dev, "failed to request irq\n");
return -EINVAL;
}
ecc->dev = dev;
mutex_init(&ecc->lock);
platform_set_drvdata(pdev, ecc);
dev_info(dev, "probed\n");
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int mtk_ecc_suspend(struct device *dev)
{
struct mtk_ecc *ecc = dev_get_drvdata(dev);
clk_disable_unprepare(ecc->clk);
return 0;
}
static int mtk_ecc_resume(struct device *dev)
{
struct mtk_ecc *ecc = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(ecc->clk);
if (ret) {
dev_err(dev, "failed to enable clk\n");
return ret;
}
return 0;
}
static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
#endif
MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
static struct platform_driver mtk_ecc_driver = {
.probe = mtk_ecc_probe,
.driver = {
.name = "mtk-ecc",
.of_match_table = mtk_ecc_dt_match,
#ifdef CONFIG_PM_SLEEP
.pm = &mtk_ecc_pm_ops,
#endif
},
};
module_platform_driver(mtk_ecc_driver);
MODULE_AUTHOR("Xiaolei Li <[email protected]>");
MODULE_DESCRIPTION("MTK Nand ECC Driver");
MODULE_LICENSE("Dual MIT/GPL");
| linux-master | drivers/mtd/nand/ecc-mtk.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* This file provides ECC correction for more than 1 bit per block of data,
* using binary BCH codes. It relies on the generic BCH library lib/bch.c.
*
* Copyright © 2011 Ivan Djelic <[email protected]>
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand-ecc-sw-bch.h>
/**
* nand_ecc_sw_bch_calculate - Calculate the ECC corresponding to a data block
* @nand: NAND device
* @buf: Input buffer with raw data
* @code: Output buffer with ECC
*/
int nand_ecc_sw_bch_calculate(struct nand_device *nand,
const unsigned char *buf, unsigned char *code)
{
struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
unsigned int i;
memset(code, 0, engine_conf->code_size);
bch_encode(engine_conf->bch, buf, nand->ecc.ctx.conf.step_size, code);
/* apply mask so that an erased page is a valid codeword */
for (i = 0; i < engine_conf->code_size; i++)
code[i] ^= engine_conf->eccmask[i];
return 0;
}
EXPORT_SYMBOL(nand_ecc_sw_bch_calculate);
/**
* nand_ecc_sw_bch_correct - Detect, correct and report bit error(s)
* @nand: NAND device
* @buf: Raw data read from the chip
* @read_ecc: ECC bytes from the chip
* @calc_ecc: ECC calculated from the raw data
*
* Detect and correct bit errors for a data block.
*/
int nand_ecc_sw_bch_correct(struct nand_device *nand, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc)
{
struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
unsigned int step_size = nand->ecc.ctx.conf.step_size;
unsigned int *errloc = engine_conf->errloc;
int i, count;
count = bch_decode(engine_conf->bch, NULL, step_size, read_ecc,
calc_ecc, NULL, errloc);
if (count > 0) {
for (i = 0; i < count; i++) {
if (errloc[i] < (step_size * 8))
/* The error is in the data area: correct it */
buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
/* Otherwise the error is in the ECC area: nothing to do */
pr_debug("%s: corrected bitflip %u\n", __func__,
errloc[i]);
}
} else if (count < 0) {
pr_err("ECC unrecoverable error\n");
count = -EBADMSG;
}
return count;
}
EXPORT_SYMBOL(nand_ecc_sw_bch_correct);
/**
* nand_ecc_sw_bch_cleanup - Cleanup software BCH ECC resources
* @nand: NAND device
*/
static void nand_ecc_sw_bch_cleanup(struct nand_device *nand)
{
struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
bch_free(engine_conf->bch);
kfree(engine_conf->errloc);
kfree(engine_conf->eccmask);
}
/**
* nand_ecc_sw_bch_init - Initialize software BCH ECC engine
* @nand: NAND device
*
* Returns: a pointer to a new NAND BCH control structure, or NULL upon failure
*
* Initialize NAND BCH error correction. @nand.ecc parameters 'step_size' and
* 'bytes' are used to compute the following BCH parameters:
* m, the Galois field order
* t, the error correction capability
* 'bytes' should be equal to the number of bytes required to store m * t
* bits, where m is such that 2^m - 1 > step_size * 8.
*
* Example: to configure 4 bit correction per 512 bytes, you should pass
* step_size = 512 (thus, m = 13 is the smallest integer such that 2^m - 1 > 512 * 8)
* bytes = 7 (7 bytes are required to store m * t = 13 * 4 = 52 bits)
*/
static int nand_ecc_sw_bch_init(struct nand_device *nand)
{
struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
unsigned int eccsize = nand->ecc.ctx.conf.step_size;
unsigned int eccbytes = engine_conf->code_size;
unsigned int m, t, i;
unsigned char *erased_page;
int ret;
m = fls(1 + (8 * eccsize));
t = (eccbytes * 8) / m;
engine_conf->bch = bch_init(m, t, 0, false);
if (!engine_conf->bch)
return -EINVAL;
engine_conf->eccmask = kzalloc(eccbytes, GFP_KERNEL);
engine_conf->errloc = kmalloc_array(t, sizeof(*engine_conf->errloc),
GFP_KERNEL);
if (!engine_conf->eccmask || !engine_conf->errloc) {
ret = -ENOMEM;
goto cleanup;
}
/* Compute and store the inverted ECC of an erased step */
erased_page = kmalloc(eccsize, GFP_KERNEL);
if (!erased_page) {
ret = -ENOMEM;
goto cleanup;
}
memset(erased_page, 0xff, eccsize);
bch_encode(engine_conf->bch, erased_page, eccsize,
engine_conf->eccmask);
kfree(erased_page);
for (i = 0; i < eccbytes; i++)
engine_conf->eccmask[i] ^= 0xff;
/* Verify that the number of code bytes has the expected value */
if (engine_conf->bch->ecc_bytes != eccbytes) {
pr_err("Invalid number of ECC bytes: %u, expected: %u\n",
eccbytes, engine_conf->bch->ecc_bytes);
ret = -EINVAL;
goto cleanup;
}
/* Sanity checks */
if (8 * (eccsize + eccbytes) >= (1 << m)) {
pr_err("ECC step size is too large (%u)\n", eccsize);
ret = -EINVAL;
goto cleanup;
}
return 0;
cleanup:
nand_ecc_sw_bch_cleanup(nand);
return ret;
}
int nand_ecc_sw_bch_init_ctx(struct nand_device *nand)
{
struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
struct mtd_info *mtd = nanddev_to_mtd(nand);
struct nand_ecc_sw_bch_conf *engine_conf;
unsigned int code_size = 0, nsteps;
int ret;
/* Only large page NAND chips may use BCH */
if (mtd->oobsize < 64) {
pr_err("BCH cannot be used with small page NAND chips\n");
return -EINVAL;
}
if (!mtd->ooblayout)
mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
conf->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
conf->algo = NAND_ECC_ALGO_BCH;
conf->step_size = nand->ecc.user_conf.step_size;
conf->strength = nand->ecc.user_conf.strength;
/*
* Board driver should supply ECC size and ECC strength
* values to select how many bits are correctable.
* Otherwise, default to 512 bytes for large page devices and 256 for
* small page devices.
*/
if (!conf->step_size) {
if (mtd->oobsize >= 64)
conf->step_size = 512;
else
conf->step_size = 256;
conf->strength = 4;
}
nsteps = mtd->writesize / conf->step_size;
/* Maximize */
if (nand->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) {
conf->step_size = 1024;
nsteps = mtd->writesize / conf->step_size;
/* Reserve 2 bytes for the BBM */
code_size = (mtd->oobsize - 2) / nsteps;
conf->strength = code_size * 8 / fls(8 * conf->step_size);
}
if (!code_size)
code_size = DIV_ROUND_UP(conf->strength *
fls(8 * conf->step_size), 8);
if (!conf->strength)
conf->strength = (code_size * 8) / fls(8 * conf->step_size);
if (!code_size && !conf->strength) {
pr_err("Missing ECC parameters\n");
return -EINVAL;
}
engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
if (!engine_conf)
return -ENOMEM;
ret = nand_ecc_init_req_tweaking(&engine_conf->req_ctx, nand);
if (ret)
goto free_engine_conf;
engine_conf->code_size = code_size;
engine_conf->calc_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
engine_conf->code_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
if (!engine_conf->calc_buf || !engine_conf->code_buf) {
ret = -ENOMEM;
goto free_bufs;
}
nand->ecc.ctx.priv = engine_conf;
nand->ecc.ctx.nsteps = nsteps;
nand->ecc.ctx.total = nsteps * code_size;
ret = nand_ecc_sw_bch_init(nand);
if (ret)
goto free_bufs;
/* Verify the layout validity */
if (mtd_ooblayout_count_eccbytes(mtd) !=
nand->ecc.ctx.nsteps * engine_conf->code_size) {
pr_err("Invalid ECC layout\n");
ret = -EINVAL;
goto cleanup_bch_ctx;
}
return 0;
cleanup_bch_ctx:
nand_ecc_sw_bch_cleanup(nand);
free_bufs:
nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx);
kfree(engine_conf->calc_buf);
kfree(engine_conf->code_buf);
free_engine_conf:
kfree(engine_conf);
return ret;
}
EXPORT_SYMBOL(nand_ecc_sw_bch_init_ctx);
void nand_ecc_sw_bch_cleanup_ctx(struct nand_device *nand)
{
struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
if (engine_conf) {
nand_ecc_sw_bch_cleanup(nand);
nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx);
kfree(engine_conf->calc_buf);
kfree(engine_conf->code_buf);
kfree(engine_conf);
}
}
EXPORT_SYMBOL(nand_ecc_sw_bch_cleanup_ctx);
static int nand_ecc_sw_bch_prepare_io_req(struct nand_device *nand,
struct nand_page_io_req *req)
{
struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
struct mtd_info *mtd = nanddev_to_mtd(nand);
int eccsize = nand->ecc.ctx.conf.step_size;
int eccbytes = engine_conf->code_size;
int eccsteps = nand->ecc.ctx.nsteps;
int total = nand->ecc.ctx.total;
u8 *ecccalc = engine_conf->calc_buf;
const u8 *data;
int i;
/* Nothing to do for a raw operation */
if (req->mode == MTD_OPS_RAW)
return 0;
/* This engine does not provide BBM/free OOB bytes protection */
if (!req->datalen)
return 0;
nand_ecc_tweak_req(&engine_conf->req_ctx, req);
/* No more preparation for page read */
if (req->type == NAND_PAGE_READ)
return 0;
/* Preparation for page write: derive the ECC bytes and place them */
for (i = 0, data = req->databuf.out;
eccsteps;
eccsteps--, i += eccbytes, data += eccsize)
nand_ecc_sw_bch_calculate(nand, data, &ecccalc[i]);
return mtd_ooblayout_set_eccbytes(mtd, ecccalc, (void *)req->oobbuf.out,
0, total);
}
static int nand_ecc_sw_bch_finish_io_req(struct nand_device *nand,
struct nand_page_io_req *req)
{
struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
struct mtd_info *mtd = nanddev_to_mtd(nand);
int eccsize = nand->ecc.ctx.conf.step_size;
int total = nand->ecc.ctx.total;
int eccbytes = engine_conf->code_size;
int eccsteps = nand->ecc.ctx.nsteps;
u8 *ecccalc = engine_conf->calc_buf;
u8 *ecccode = engine_conf->code_buf;
unsigned int max_bitflips = 0;
u8 *data = req->databuf.in;
int i, ret;
/* Nothing to do for a raw operation */
if (req->mode == MTD_OPS_RAW)
return 0;
/* This engine does not provide BBM/free OOB bytes protection */
if (!req->datalen)
return 0;
/* No more preparation for page write */
if (req->type == NAND_PAGE_WRITE) {
nand_ecc_restore_req(&engine_conf->req_ctx, req);
return 0;
}
/* Finish a page read: retrieve the (raw) ECC bytes*/
ret = mtd_ooblayout_get_eccbytes(mtd, ecccode, req->oobbuf.in, 0,
total);
if (ret)
return ret;
/* Calculate the ECC bytes */
for (i = 0; eccsteps; eccsteps--, i += eccbytes, data += eccsize)
nand_ecc_sw_bch_calculate(nand, data, &ecccalc[i]);
/* Finish a page read: compare and correct */
for (eccsteps = nand->ecc.ctx.nsteps, i = 0, data = req->databuf.in;
eccsteps;
eccsteps--, i += eccbytes, data += eccsize) {
int stat = nand_ecc_sw_bch_correct(nand, data,
&ecccode[i],
&ecccalc[i]);
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
nand_ecc_restore_req(&engine_conf->req_ctx, req);
return max_bitflips;
}
static struct nand_ecc_engine_ops nand_ecc_sw_bch_engine_ops = {
.init_ctx = nand_ecc_sw_bch_init_ctx,
.cleanup_ctx = nand_ecc_sw_bch_cleanup_ctx,
.prepare_io_req = nand_ecc_sw_bch_prepare_io_req,
.finish_io_req = nand_ecc_sw_bch_finish_io_req,
};
static struct nand_ecc_engine nand_ecc_sw_bch_engine = {
.ops = &nand_ecc_sw_bch_engine_ops,
};
struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void)
{
return &nand_ecc_sw_bch_engine;
}
EXPORT_SYMBOL(nand_ecc_sw_bch_get_engine);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ivan Djelic <[email protected]>");
MODULE_DESCRIPTION("NAND software BCH ECC support");
| linux-master | drivers/mtd/nand/ecc-sw-bch.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Generic Error-Correcting Code (ECC) engine
*
* Copyright (C) 2019 Macronix
* Author:
* Miquèl RAYNAL <[email protected]>
*
*
* This file describes the abstraction of any NAND ECC engine. It has been
* designed to fit most cases, including parallel NANDs and SPI-NANDs.
*
* There are three main situations where instantiating this ECC engine makes
* sense:
* - external: The ECC engine is outside the NAND pipeline, typically this
* is a software ECC engine, or an hardware engine that is
* outside the NAND controller pipeline.
* - pipelined: The ECC engine is inside the NAND pipeline, ie. on the
* controller's side. This is the case of most of the raw NAND
* controllers. In the pipeline case, the ECC bytes are
* generated/data corrected on the fly when a page is
* written/read.
* - ondie: The ECC engine is inside the NAND pipeline, on the chip's side.
* Some NAND chips can correct themselves the data.
*
* Besides the initial setup and final cleanups, the interfaces are rather
* simple:
* - prepare: Prepare an I/O request. Enable/disable the ECC engine based on
* the I/O request type. In case of software correction or external
* engine, this step may involve to derive the ECC bytes and place
* them in the OOB area before a write.
* - finish: Finish an I/O request. Correct the data in case of a read
* request and report the number of corrected bits/uncorrectable
* errors. Most likely empty for write operations, unless you have
* hardware specific stuff to do, like shutting down the engine to
* save power.
*
* The I/O request should be enclosed in a prepare()/finish() pair of calls
* and will behave differently depending on the requested I/O type:
* - raw: Correction disabled
* - ecc: Correction enabled
*
* The request direction is impacting the logic as well:
* - read: Load data from the NAND chip
* - write: Store data in the NAND chip
*
* Mixing all this combinations together gives the following behavior.
* Those are just examples, drivers are free to add custom steps in their
* prepare/finish hook.
*
* [external ECC engine]
* - external + prepare + raw + read: do nothing
* - external + finish + raw + read: do nothing
* - external + prepare + raw + write: do nothing
* - external + finish + raw + write: do nothing
* - external + prepare + ecc + read: do nothing
* - external + finish + ecc + read: calculate expected ECC bytes, extract
* ECC bytes from OOB buffer, correct
* and report any bitflip/error
* - external + prepare + ecc + write: calculate ECC bytes and store them at
* the right place in the OOB buffer based
* on the OOB layout
* - external + finish + ecc + write: do nothing
*
* [pipelined ECC engine]
* - pipelined + prepare + raw + read: disable the controller's ECC engine if
* activated
* - pipelined + finish + raw + read: do nothing
* - pipelined + prepare + raw + write: disable the controller's ECC engine if
* activated
* - pipelined + finish + raw + write: do nothing
* - pipelined + prepare + ecc + read: enable the controller's ECC engine if
* deactivated
* - pipelined + finish + ecc + read: check the status, report any
* error/bitflip
* - pipelined + prepare + ecc + write: enable the controller's ECC engine if
* deactivated
* - pipelined + finish + ecc + write: do nothing
*
* [ondie ECC engine]
* - ondie + prepare + raw + read: send commands to disable the on-chip ECC
* engine if activated
* - ondie + finish + raw + read: do nothing
* - ondie + prepare + raw + write: send commands to disable the on-chip ECC
* engine if activated
* - ondie + finish + raw + write: do nothing
* - ondie + prepare + ecc + read: send commands to enable the on-chip ECC
* engine if deactivated
* - ondie + finish + ecc + read: send commands to check the status, report
* any error/bitflip
* - ondie + prepare + ecc + write: send commands to enable the on-chip ECC
* engine if deactivated
* - ondie + finish + ecc + write: do nothing
*/
#include <linux/module.h>
#include <linux/mtd/nand.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_platform.h>
static LIST_HEAD(on_host_hw_engines);
static DEFINE_MUTEX(on_host_hw_engines_mutex);
/**
* nand_ecc_init_ctx - Init the ECC engine context
* @nand: the NAND device
*
* On success, the caller is responsible of calling @nand_ecc_cleanup_ctx().
*/
int nand_ecc_init_ctx(struct nand_device *nand)
{
if (!nand->ecc.engine || !nand->ecc.engine->ops->init_ctx)
return 0;
return nand->ecc.engine->ops->init_ctx(nand);
}
EXPORT_SYMBOL(nand_ecc_init_ctx);
/**
* nand_ecc_cleanup_ctx - Cleanup the ECC engine context
* @nand: the NAND device
*/
void nand_ecc_cleanup_ctx(struct nand_device *nand)
{
if (nand->ecc.engine && nand->ecc.engine->ops->cleanup_ctx)
nand->ecc.engine->ops->cleanup_ctx(nand);
}
EXPORT_SYMBOL(nand_ecc_cleanup_ctx);
/**
* nand_ecc_prepare_io_req - Prepare an I/O request
* @nand: the NAND device
* @req: the I/O request
*/
int nand_ecc_prepare_io_req(struct nand_device *nand,
struct nand_page_io_req *req)
{
if (!nand->ecc.engine || !nand->ecc.engine->ops->prepare_io_req)
return 0;
return nand->ecc.engine->ops->prepare_io_req(nand, req);
}
EXPORT_SYMBOL(nand_ecc_prepare_io_req);
/**
* nand_ecc_finish_io_req - Finish an I/O request
* @nand: the NAND device
* @req: the I/O request
*/
int nand_ecc_finish_io_req(struct nand_device *nand,
struct nand_page_io_req *req)
{
if (!nand->ecc.engine || !nand->ecc.engine->ops->finish_io_req)
return 0;
return nand->ecc.engine->ops->finish_io_req(nand, req);
}
EXPORT_SYMBOL(nand_ecc_finish_io_req);
/* Define default OOB placement schemes for large and small page devices */
static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
unsigned int total_ecc_bytes = nand->ecc.ctx.total;
if (section > 1)
return -ERANGE;
if (!section) {
oobregion->offset = 0;
if (mtd->oobsize == 16)
oobregion->length = 4;
else
oobregion->length = 3;
} else {
if (mtd->oobsize == 8)
return -ERANGE;
oobregion->offset = 6;
oobregion->length = total_ecc_bytes - 4;
}
return 0;
}
static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section > 1)
return -ERANGE;
if (mtd->oobsize == 16) {
if (section)
return -ERANGE;
oobregion->length = 8;
oobregion->offset = 8;
} else {
oobregion->length = 2;
if (!section)
oobregion->offset = 3;
else
oobregion->offset = 6;
}
return 0;
}
static const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
.ecc = nand_ooblayout_ecc_sp,
.free = nand_ooblayout_free_sp,
};
const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void)
{
return &nand_ooblayout_sp_ops;
}
EXPORT_SYMBOL_GPL(nand_get_small_page_ooblayout);
static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
unsigned int total_ecc_bytes = nand->ecc.ctx.total;
if (section || !total_ecc_bytes)
return -ERANGE;
oobregion->length = total_ecc_bytes;
oobregion->offset = mtd->oobsize - oobregion->length;
return 0;
}
static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
unsigned int total_ecc_bytes = nand->ecc.ctx.total;
if (section)
return -ERANGE;
oobregion->length = mtd->oobsize - total_ecc_bytes - 2;
oobregion->offset = 2;
return 0;
}
static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
.ecc = nand_ooblayout_ecc_lp,
.free = nand_ooblayout_free_lp,
};
const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void)
{
return &nand_ooblayout_lp_ops;
}
EXPORT_SYMBOL_GPL(nand_get_large_page_ooblayout);
/*
* Support the old "large page" layout used for 1-bit Hamming ECC where ECC
* are placed at a fixed offset.
*/
static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
unsigned int total_ecc_bytes = nand->ecc.ctx.total;
if (section)
return -ERANGE;
switch (mtd->oobsize) {
case 64:
oobregion->offset = 40;
break;
case 128:
oobregion->offset = 80;
break;
default:
return -EINVAL;
}
oobregion->length = total_ecc_bytes;
if (oobregion->offset + oobregion->length > mtd->oobsize)
return -ERANGE;
return 0;
}
static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
unsigned int total_ecc_bytes = nand->ecc.ctx.total;
int ecc_offset = 0;
if (section < 0 || section > 1)
return -ERANGE;
switch (mtd->oobsize) {
case 64:
ecc_offset = 40;
break;
case 128:
ecc_offset = 80;
break;
default:
return -EINVAL;
}
if (section == 0) {
oobregion->offset = 2;
oobregion->length = ecc_offset - 2;
} else {
oobregion->offset = ecc_offset + total_ecc_bytes;
oobregion->length = mtd->oobsize - oobregion->offset;
}
return 0;
}
static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
.ecc = nand_ooblayout_ecc_lp_hamming,
.free = nand_ooblayout_free_lp_hamming,
};
const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void)
{
return &nand_ooblayout_lp_hamming_ops;
}
EXPORT_SYMBOL_GPL(nand_get_large_page_hamming_ooblayout);
static enum nand_ecc_engine_type
of_get_nand_ecc_engine_type(struct device_node *np)
{
struct device_node *eng_np;
if (of_property_read_bool(np, "nand-no-ecc-engine"))
return NAND_ECC_ENGINE_TYPE_NONE;
if (of_property_read_bool(np, "nand-use-soft-ecc-engine"))
return NAND_ECC_ENGINE_TYPE_SOFT;
eng_np = of_parse_phandle(np, "nand-ecc-engine", 0);
of_node_put(eng_np);
if (eng_np) {
if (eng_np == np)
return NAND_ECC_ENGINE_TYPE_ON_DIE;
else
return NAND_ECC_ENGINE_TYPE_ON_HOST;
}
return NAND_ECC_ENGINE_TYPE_INVALID;
}
static const char * const nand_ecc_placement[] = {
[NAND_ECC_PLACEMENT_OOB] = "oob",
[NAND_ECC_PLACEMENT_INTERLEAVED] = "interleaved",
};
static enum nand_ecc_placement of_get_nand_ecc_placement(struct device_node *np)
{
enum nand_ecc_placement placement;
const char *pm;
int err;
err = of_property_read_string(np, "nand-ecc-placement", &pm);
if (!err) {
for (placement = NAND_ECC_PLACEMENT_OOB;
placement < ARRAY_SIZE(nand_ecc_placement); placement++) {
if (!strcasecmp(pm, nand_ecc_placement[placement]))
return placement;
}
}
return NAND_ECC_PLACEMENT_UNKNOWN;
}
static const char * const nand_ecc_algos[] = {
[NAND_ECC_ALGO_HAMMING] = "hamming",
[NAND_ECC_ALGO_BCH] = "bch",
[NAND_ECC_ALGO_RS] = "rs",
};
static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np)
{
enum nand_ecc_algo ecc_algo;
const char *pm;
int err;
err = of_property_read_string(np, "nand-ecc-algo", &pm);
if (!err) {
for (ecc_algo = NAND_ECC_ALGO_HAMMING;
ecc_algo < ARRAY_SIZE(nand_ecc_algos);
ecc_algo++) {
if (!strcasecmp(pm, nand_ecc_algos[ecc_algo]))
return ecc_algo;
}
}
return NAND_ECC_ALGO_UNKNOWN;
}
static int of_get_nand_ecc_step_size(struct device_node *np)
{
int ret;
u32 val;
ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
return ret ? ret : val;
}
static int of_get_nand_ecc_strength(struct device_node *np)
{
int ret;
u32 val;
ret = of_property_read_u32(np, "nand-ecc-strength", &val);
return ret ? ret : val;
}
void of_get_nand_ecc_user_config(struct nand_device *nand)
{
struct device_node *dn = nanddev_get_of_node(nand);
int strength, size;
nand->ecc.user_conf.engine_type = of_get_nand_ecc_engine_type(dn);
nand->ecc.user_conf.algo = of_get_nand_ecc_algo(dn);
nand->ecc.user_conf.placement = of_get_nand_ecc_placement(dn);
strength = of_get_nand_ecc_strength(dn);
if (strength >= 0)
nand->ecc.user_conf.strength = strength;
size = of_get_nand_ecc_step_size(dn);
if (size >= 0)
nand->ecc.user_conf.step_size = size;
if (of_property_read_bool(dn, "nand-ecc-maximize"))
nand->ecc.user_conf.flags |= NAND_ECC_MAXIMIZE_STRENGTH;
}
EXPORT_SYMBOL(of_get_nand_ecc_user_config);
/**
* nand_ecc_is_strong_enough - Check if the chip configuration meets the
* datasheet requirements.
*
* @nand: Device to check
*
* If our configuration corrects A bits per B bytes and the minimum
* required correction level is X bits per Y bytes, then we must ensure
* both of the following are true:
*
* (1) A / B >= X / Y
* (2) A >= X
*
* Requirement (1) ensures we can correct for the required bitflip density.
* Requirement (2) ensures we can correct even when all bitflips are clumped
* in the same sector.
*/
bool nand_ecc_is_strong_enough(struct nand_device *nand)
{
const struct nand_ecc_props *reqs = nanddev_get_ecc_requirements(nand);
const struct nand_ecc_props *conf = nanddev_get_ecc_conf(nand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
int corr, ds_corr;
if (conf->step_size == 0 || reqs->step_size == 0)
/* Not enough information */
return true;
/*
* We get the number of corrected bits per page to compare
* the correction density.
*/
corr = (mtd->writesize * conf->strength) / conf->step_size;
ds_corr = (mtd->writesize * reqs->strength) / reqs->step_size;
return corr >= ds_corr && conf->strength >= reqs->strength;
}
EXPORT_SYMBOL(nand_ecc_is_strong_enough);
/* ECC engine driver internal helpers */
int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
struct nand_device *nand)
{
unsigned int total_buffer_size;
ctx->nand = nand;
/* Let the user decide the exact length of each buffer */
if (!ctx->page_buffer_size)
ctx->page_buffer_size = nanddev_page_size(nand);
if (!ctx->oob_buffer_size)
ctx->oob_buffer_size = nanddev_per_page_oobsize(nand);
total_buffer_size = ctx->page_buffer_size + ctx->oob_buffer_size;
ctx->spare_databuf = kzalloc(total_buffer_size, GFP_KERNEL);
if (!ctx->spare_databuf)
return -ENOMEM;
ctx->spare_oobbuf = ctx->spare_databuf + ctx->page_buffer_size;
return 0;
}
EXPORT_SYMBOL_GPL(nand_ecc_init_req_tweaking);
void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx)
{
kfree(ctx->spare_databuf);
}
EXPORT_SYMBOL_GPL(nand_ecc_cleanup_req_tweaking);
/*
* Ensure data and OOB area is fully read/written otherwise the correction might
* not work as expected.
*/
void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
struct nand_page_io_req *req)
{
struct nand_device *nand = ctx->nand;
struct nand_page_io_req *orig, *tweak;
/* Save the original request */
ctx->orig_req = *req;
ctx->bounce_data = false;
ctx->bounce_oob = false;
orig = &ctx->orig_req;
tweak = req;
/* Ensure the request covers the entire page */
if (orig->datalen < nanddev_page_size(nand)) {
ctx->bounce_data = true;
tweak->dataoffs = 0;
tweak->datalen = nanddev_page_size(nand);
tweak->databuf.in = ctx->spare_databuf;
memset(tweak->databuf.in, 0xFF, ctx->page_buffer_size);
}
if (orig->ooblen < nanddev_per_page_oobsize(nand)) {
ctx->bounce_oob = true;
tweak->ooboffs = 0;
tweak->ooblen = nanddev_per_page_oobsize(nand);
tweak->oobbuf.in = ctx->spare_oobbuf;
memset(tweak->oobbuf.in, 0xFF, ctx->oob_buffer_size);
}
/* Copy the data that must be writen in the bounce buffers, if needed */
if (orig->type == NAND_PAGE_WRITE) {
if (ctx->bounce_data)
memcpy((void *)tweak->databuf.out + orig->dataoffs,
orig->databuf.out, orig->datalen);
if (ctx->bounce_oob)
memcpy((void *)tweak->oobbuf.out + orig->ooboffs,
orig->oobbuf.out, orig->ooblen);
}
}
EXPORT_SYMBOL_GPL(nand_ecc_tweak_req);
void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
struct nand_page_io_req *req)
{
struct nand_page_io_req *orig, *tweak;
orig = &ctx->orig_req;
tweak = req;
/* Restore the data read from the bounce buffers, if needed */
if (orig->type == NAND_PAGE_READ) {
if (ctx->bounce_data)
memcpy(orig->databuf.in,
tweak->databuf.in + orig->dataoffs,
orig->datalen);
if (ctx->bounce_oob)
memcpy(orig->oobbuf.in,
tweak->oobbuf.in + orig->ooboffs,
orig->ooblen);
}
/* Ensure the original request is restored */
*req = *orig;
}
EXPORT_SYMBOL_GPL(nand_ecc_restore_req);
struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand)
{
unsigned int algo = nand->ecc.user_conf.algo;
if (algo == NAND_ECC_ALGO_UNKNOWN)
algo = nand->ecc.defaults.algo;
switch (algo) {
case NAND_ECC_ALGO_HAMMING:
return nand_ecc_sw_hamming_get_engine();
case NAND_ECC_ALGO_BCH:
return nand_ecc_sw_bch_get_engine();
default:
break;
}
return NULL;
}
EXPORT_SYMBOL(nand_ecc_get_sw_engine);
struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand)
{
return nand->ecc.ondie_engine;
}
EXPORT_SYMBOL(nand_ecc_get_on_die_hw_engine);
int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine)
{
struct nand_ecc_engine *item;
if (!engine)
return -EINVAL;
/* Prevent multiple registrations of one engine */
list_for_each_entry(item, &on_host_hw_engines, node)
if (item == engine)
return 0;
mutex_lock(&on_host_hw_engines_mutex);
list_add_tail(&engine->node, &on_host_hw_engines);
mutex_unlock(&on_host_hw_engines_mutex);
return 0;
}
EXPORT_SYMBOL(nand_ecc_register_on_host_hw_engine);
int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine)
{
if (!engine)
return -EINVAL;
mutex_lock(&on_host_hw_engines_mutex);
list_del(&engine->node);
mutex_unlock(&on_host_hw_engines_mutex);
return 0;
}
EXPORT_SYMBOL(nand_ecc_unregister_on_host_hw_engine);
static struct nand_ecc_engine *nand_ecc_match_on_host_hw_engine(struct device *dev)
{
struct nand_ecc_engine *item;
list_for_each_entry(item, &on_host_hw_engines, node)
if (item->dev == dev)
return item;
return NULL;
}
struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand)
{
struct nand_ecc_engine *engine = NULL;
struct device *dev = &nand->mtd.dev;
struct platform_device *pdev;
struct device_node *np;
if (list_empty(&on_host_hw_engines))
return NULL;
/* Check for an explicit nand-ecc-engine property */
np = of_parse_phandle(dev->of_node, "nand-ecc-engine", 0);
if (np) {
pdev = of_find_device_by_node(np);
if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
engine = nand_ecc_match_on_host_hw_engine(&pdev->dev);
platform_device_put(pdev);
of_node_put(np);
if (!engine)
return ERR_PTR(-EPROBE_DEFER);
}
if (engine)
get_device(engine->dev);
return engine;
}
EXPORT_SYMBOL(nand_ecc_get_on_host_hw_engine);
void nand_ecc_put_on_host_hw_engine(struct nand_device *nand)
{
put_device(nand->ecc.engine->dev);
}
EXPORT_SYMBOL(nand_ecc_put_on_host_hw_engine);
/*
* In the case of a pipelined engine, the device registering the ECC
* engine is not necessarily the ECC engine itself but may be a host controller.
* It is then useful to provide a helper to retrieve the right device object
* which actually represents the ECC engine.
*/
struct device *nand_ecc_get_engine_dev(struct device *host)
{
struct platform_device *ecc_pdev;
struct device_node *np;
/*
* If the device node contains this property, it means we need to follow
* it in order to get the right ECC engine device we are looking for.
*/
np = of_parse_phandle(host->of_node, "nand-ecc-engine", 0);
if (!np)
return host;
ecc_pdev = of_find_device_by_node(np);
if (!ecc_pdev) {
of_node_put(np);
return NULL;
}
platform_device_put(ecc_pdev);
of_node_put(np);
return &ecc_pdev->dev;
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Miquel Raynal <[email protected]>");
MODULE_DESCRIPTION("Generic ECC engine");
| linux-master | drivers/mtd/nand/ecc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Macronix external hardware ECC engine for NAND devices, also
* called DPE for Data Processing Engine.
*
* Copyright © 2019 Macronix
* Author: Miquel Raynal <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand-ecc-mxic.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
/* DPE Configuration */
#define DP_CONFIG 0x00
#define ECC_EN BIT(0)
#define ECC_TYP(idx) (((idx) << 3) & GENMASK(6, 3))
/* DPE Interrupt Status */
#define INTRPT_STS 0x04
#define TRANS_CMPLT BIT(0)
#define SDMA_MAIN BIT(1)
#define SDMA_SPARE BIT(2)
#define ECC_ERR BIT(3)
#define TO_SPARE BIT(4)
#define TO_MAIN BIT(5)
/* DPE Interrupt Status Enable */
#define INTRPT_STS_EN 0x08
/* DPE Interrupt Signal Enable */
#define INTRPT_SIG_EN 0x0C
/* Host Controller Configuration */
#define HC_CONFIG 0x10
#define DEV2MEM 0 /* TRANS_TYP_DMA in the spec */
#define MEM2MEM BIT(4) /* TRANS_TYP_IO in the spec */
#define MAPPING BIT(5) /* TRANS_TYP_MAPPING in the spec */
#define ECC_PACKED 0 /* LAYOUT_TYP_INTEGRATED in the spec */
#define ECC_INTERLEAVED BIT(2) /* LAYOUT_TYP_DISTRIBUTED in the spec */
#define BURST_TYP_FIXED 0
#define BURST_TYP_INCREASING BIT(0)
/* Host Controller Slave Address */
#define HC_SLV_ADDR 0x14
/* ECC Chunk Size */
#define CHUNK_SIZE 0x20
/* Main Data Size */
#define MAIN_SIZE 0x24
/* Spare Data Size */
#define SPARE_SIZE 0x28
#define META_SZ(reg) ((reg) & GENMASK(7, 0))
#define PARITY_SZ(reg) (((reg) & GENMASK(15, 8)) >> 8)
#define RSV_SZ(reg) (((reg) & GENMASK(23, 16)) >> 16)
#define SPARE_SZ(reg) ((reg) >> 24)
/* ECC Chunk Count */
#define CHUNK_CNT 0x30
/* SDMA Control */
#define SDMA_CTRL 0x40
#define WRITE_NAND 0
#define READ_NAND BIT(1)
#define CONT_NAND BIT(29)
#define CONT_SYSM BIT(30) /* Continue System Memory? */
#define SDMA_STRT BIT(31)
/* SDMA Address of Main Data */
#define SDMA_MAIN_ADDR 0x44
/* SDMA Address of Spare Data */
#define SDMA_SPARE_ADDR 0x48
/* DPE Version Number */
#define DP_VER 0xD0
#define DP_VER_OFFSET 16
/* Status bytes between each chunk of spare data */
#define STAT_BYTES 4
#define NO_ERR 0x00
#define MAX_CORR_ERR 0x28
#define UNCORR_ERR 0xFE
#define ERASED_CHUNK 0xFF
struct mxic_ecc_engine {
struct device *dev;
void __iomem *regs;
int irq;
struct completion complete;
struct nand_ecc_engine external_engine;
struct nand_ecc_engine pipelined_engine;
struct mutex lock;
};
struct mxic_ecc_ctx {
/* ECC machinery */
unsigned int data_step_sz;
unsigned int oob_step_sz;
unsigned int parity_sz;
unsigned int meta_sz;
u8 *status;
int steps;
/* DMA boilerplate */
struct nand_ecc_req_tweak_ctx req_ctx;
u8 *oobwithstat;
struct scatterlist sg[2];
struct nand_page_io_req *req;
unsigned int pageoffs;
};
static struct mxic_ecc_engine *ext_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
{
return container_of(eng, struct mxic_ecc_engine, external_engine);
}
static struct mxic_ecc_engine *pip_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
{
return container_of(eng, struct mxic_ecc_engine, pipelined_engine);
}
static struct mxic_ecc_engine *nand_to_mxic(struct nand_device *nand)
{
struct nand_ecc_engine *eng = nand->ecc.engine;
if (eng->integration == NAND_ECC_ENGINE_INTEGRATION_EXTERNAL)
return ext_ecc_eng_to_mxic(eng);
else
return pip_ecc_eng_to_mxic(eng);
}
static int mxic_ecc_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
if (section < 0 || section >= ctx->steps)
return -ERANGE;
oobregion->offset = (section * ctx->oob_step_sz) + ctx->meta_sz;
oobregion->length = ctx->parity_sz;
return 0;
}
static int mxic_ecc_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
if (section < 0 || section >= ctx->steps)
return -ERANGE;
if (!section) {
oobregion->offset = 2;
oobregion->length = ctx->meta_sz - 2;
} else {
oobregion->offset = section * ctx->oob_step_sz;
oobregion->length = ctx->meta_sz;
}
return 0;
}
static const struct mtd_ooblayout_ops mxic_ecc_ooblayout_ops = {
.ecc = mxic_ecc_ooblayout_ecc,
.free = mxic_ecc_ooblayout_free,
};
static void mxic_ecc_disable_engine(struct mxic_ecc_engine *mxic)
{
u32 reg;
reg = readl(mxic->regs + DP_CONFIG);
reg &= ~ECC_EN;
writel(reg, mxic->regs + DP_CONFIG);
}
static void mxic_ecc_enable_engine(struct mxic_ecc_engine *mxic)
{
u32 reg;
reg = readl(mxic->regs + DP_CONFIG);
reg |= ECC_EN;
writel(reg, mxic->regs + DP_CONFIG);
}
static void mxic_ecc_disable_int(struct mxic_ecc_engine *mxic)
{
writel(0, mxic->regs + INTRPT_SIG_EN);
}
static void mxic_ecc_enable_int(struct mxic_ecc_engine *mxic)
{
writel(TRANS_CMPLT, mxic->regs + INTRPT_SIG_EN);
}
static irqreturn_t mxic_ecc_isr(int irq, void *dev_id)
{
struct mxic_ecc_engine *mxic = dev_id;
u32 sts;
sts = readl(mxic->regs + INTRPT_STS);
if (!sts)
return IRQ_NONE;
if (sts & TRANS_CMPLT)
complete(&mxic->complete);
writel(sts, mxic->regs + INTRPT_STS);
return IRQ_HANDLED;
}
static int mxic_ecc_init_ctx(struct nand_device *nand, struct device *dev)
{
struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
struct nand_ecc_props *reqs = &nand->ecc.requirements;
struct nand_ecc_props *user = &nand->ecc.user_conf;
struct mtd_info *mtd = nanddev_to_mtd(nand);
int step_size = 0, strength = 0, desired_correction = 0, steps, idx;
static const int possible_strength[] = {4, 8, 40, 48};
static const int spare_size[] = {32, 32, 96, 96};
struct mxic_ecc_ctx *ctx;
u32 spare_reg;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
nand->ecc.ctx.priv = ctx;
/* Only large page NAND chips may use BCH */
if (mtd->oobsize < 64) {
pr_err("BCH cannot be used with small page NAND chips\n");
return -EINVAL;
}
mtd_set_ooblayout(mtd, &mxic_ecc_ooblayout_ops);
/* Enable all status bits */
writel(TRANS_CMPLT | SDMA_MAIN | SDMA_SPARE | ECC_ERR |
TO_SPARE | TO_MAIN, mxic->regs + INTRPT_STS_EN);
/* Configure the correction depending on the NAND device topology */
if (user->step_size && user->strength) {
step_size = user->step_size;
strength = user->strength;
} else if (reqs->step_size && reqs->strength) {
step_size = reqs->step_size;
strength = reqs->strength;
}
if (step_size && strength) {
steps = mtd->writesize / step_size;
desired_correction = steps * strength;
}
/* Step size is fixed to 1kiB, strength may vary (4 possible values) */
conf->step_size = SZ_1K;
steps = mtd->writesize / conf->step_size;
ctx->status = devm_kzalloc(dev, steps * sizeof(u8), GFP_KERNEL);
if (!ctx->status)
return -ENOMEM;
if (desired_correction) {
strength = desired_correction / steps;
for (idx = 0; idx < ARRAY_SIZE(possible_strength); idx++)
if (possible_strength[idx] >= strength)
break;
idx = min_t(unsigned int, idx,
ARRAY_SIZE(possible_strength) - 1);
} else {
/* Missing data, maximize the correction */
idx = ARRAY_SIZE(possible_strength) - 1;
}
/* Tune the selected strength until it fits in the OOB area */
for (; idx >= 0; idx--) {
if (spare_size[idx] * steps <= mtd->oobsize)
break;
}
/* This engine cannot be used with this NAND device */
if (idx < 0)
return -EINVAL;
/* Configure the engine for the desired strength */
writel(ECC_TYP(idx), mxic->regs + DP_CONFIG);
conf->strength = possible_strength[idx];
spare_reg = readl(mxic->regs + SPARE_SIZE);
ctx->steps = steps;
ctx->data_step_sz = mtd->writesize / steps;
ctx->oob_step_sz = mtd->oobsize / steps;
ctx->parity_sz = PARITY_SZ(spare_reg);
ctx->meta_sz = META_SZ(spare_reg);
/* Ensure buffers will contain enough bytes to store the STAT_BYTES */
ctx->req_ctx.oob_buffer_size = nanddev_per_page_oobsize(nand) +
(ctx->steps * STAT_BYTES);
ret = nand_ecc_init_req_tweaking(&ctx->req_ctx, nand);
if (ret)
return ret;
ctx->oobwithstat = kmalloc(mtd->oobsize + (ctx->steps * STAT_BYTES),
GFP_KERNEL);
if (!ctx->oobwithstat) {
ret = -ENOMEM;
goto cleanup_req_tweak;
}
sg_init_table(ctx->sg, 2);
/* Configuration dump and sanity checks */
dev_err(dev, "DPE version number: %d\n",
readl(mxic->regs + DP_VER) >> DP_VER_OFFSET);
dev_err(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE));
dev_err(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE));
dev_err(dev, "Spare size: %d\n", SPARE_SZ(spare_reg));
dev_err(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg));
dev_err(dev, "Parity size: %d\n", ctx->parity_sz);
dev_err(dev, "Meta size: %d\n", ctx->meta_sz);
if ((ctx->meta_sz + ctx->parity_sz + RSV_SZ(spare_reg)) !=
SPARE_SZ(spare_reg)) {
dev_err(dev, "Wrong OOB configuration: %d + %d + %ld != %d\n",
ctx->meta_sz, ctx->parity_sz, RSV_SZ(spare_reg),
SPARE_SZ(spare_reg));
ret = -EINVAL;
goto free_oobwithstat;
}
if (ctx->oob_step_sz != SPARE_SZ(spare_reg)) {
dev_err(dev, "Wrong OOB configuration: %d != %d\n",
ctx->oob_step_sz, SPARE_SZ(spare_reg));
ret = -EINVAL;
goto free_oobwithstat;
}
return 0;
free_oobwithstat:
kfree(ctx->oobwithstat);
cleanup_req_tweak:
nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
return ret;
}
static int mxic_ecc_init_ctx_external(struct nand_device *nand)
{
struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
struct device *dev = nand->ecc.engine->dev;
int ret;
dev_info(dev, "Macronix ECC engine in external mode\n");
ret = mxic_ecc_init_ctx(nand, dev);
if (ret)
return ret;
/* Trigger each step manually */
writel(1, mxic->regs + CHUNK_CNT);
writel(BURST_TYP_INCREASING | ECC_PACKED | MEM2MEM,
mxic->regs + HC_CONFIG);
return 0;
}
static int mxic_ecc_init_ctx_pipelined(struct nand_device *nand)
{
struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
struct mxic_ecc_ctx *ctx;
struct device *dev;
int ret;
dev = nand_ecc_get_engine_dev(nand->ecc.engine->dev);
if (!dev)
return -EINVAL;
dev_info(dev, "Macronix ECC engine in pipelined/mapping mode\n");
ret = mxic_ecc_init_ctx(nand, dev);
if (ret)
return ret;
ctx = nand_to_ecc_ctx(nand);
/* All steps should be handled in one go directly by the internal DMA */
writel(ctx->steps, mxic->regs + CHUNK_CNT);
/*
* Interleaved ECC scheme cannot be used otherwise factory bad block
* markers would be lost. A packed layout is mandatory.
*/
writel(BURST_TYP_INCREASING | ECC_PACKED | MAPPING,
mxic->regs + HC_CONFIG);
return 0;
}
static void mxic_ecc_cleanup_ctx(struct nand_device *nand)
{
struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
if (ctx) {
nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
kfree(ctx->oobwithstat);
}
}
static int mxic_ecc_data_xfer_wait_for_completion(struct mxic_ecc_engine *mxic)
{
u32 val;
int ret;
if (mxic->irq) {
reinit_completion(&mxic->complete);
mxic_ecc_enable_int(mxic);
ret = wait_for_completion_timeout(&mxic->complete,
msecs_to_jiffies(1000));
ret = ret ? 0 : -ETIMEDOUT;
mxic_ecc_disable_int(mxic);
} else {
ret = readl_poll_timeout(mxic->regs + INTRPT_STS, val,
val & TRANS_CMPLT, 10, USEC_PER_SEC);
writel(val, mxic->regs + INTRPT_STS);
}
if (ret) {
dev_err(mxic->dev, "Timeout on data xfer completion\n");
return -ETIMEDOUT;
}
return 0;
}
static int mxic_ecc_process_data(struct mxic_ecc_engine *mxic,
unsigned int direction)
{
unsigned int dir = (direction == NAND_PAGE_READ) ?
READ_NAND : WRITE_NAND;
int ret;
mxic_ecc_enable_engine(mxic);
/* Trigger processing */
writel(SDMA_STRT | dir, mxic->regs + SDMA_CTRL);
/* Wait for completion */
ret = mxic_ecc_data_xfer_wait_for_completion(mxic);
mxic_ecc_disable_engine(mxic);
return ret;
}
int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
unsigned int direction, dma_addr_t dirmap)
{
struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng);
if (dirmap)
writel(dirmap, mxic->regs + HC_SLV_ADDR);
return mxic_ecc_process_data(mxic, direction);
}
EXPORT_SYMBOL_GPL(mxic_ecc_process_data_pipelined);
static void mxic_ecc_extract_status_bytes(struct mxic_ecc_ctx *ctx)
{
u8 *buf = ctx->oobwithstat;
int next_stat_pos;
int step;
/* Extract the ECC status */
for (step = 0; step < ctx->steps; step++) {
next_stat_pos = ctx->oob_step_sz +
((STAT_BYTES + ctx->oob_step_sz) * step);
ctx->status[step] = buf[next_stat_pos];
}
}
static void mxic_ecc_reconstruct_oobbuf(struct mxic_ecc_ctx *ctx,
u8 *dst, const u8 *src)
{
int step;
/* Reconstruct the OOB buffer linearly (without the ECC status bytes) */
for (step = 0; step < ctx->steps; step++)
memcpy(dst + (step * ctx->oob_step_sz),
src + (step * (ctx->oob_step_sz + STAT_BYTES)),
ctx->oob_step_sz);
}
static void mxic_ecc_add_room_in_oobbuf(struct mxic_ecc_ctx *ctx,
u8 *dst, const u8 *src)
{
int step;
/* Add some space in the OOB buffer for the status bytes */
for (step = 0; step < ctx->steps; step++)
memcpy(dst + (step * (ctx->oob_step_sz + STAT_BYTES)),
src + (step * ctx->oob_step_sz),
ctx->oob_step_sz);
}
static int mxic_ecc_count_biterrs(struct mxic_ecc_engine *mxic,
struct nand_device *nand)
{
struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
struct device *dev = mxic->dev;
unsigned int max_bf = 0;
bool failure = false;
int step;
for (step = 0; step < ctx->steps; step++) {
u8 stat = ctx->status[step];
if (stat == NO_ERR) {
dev_dbg(dev, "ECC step %d: no error\n", step);
} else if (stat == ERASED_CHUNK) {
dev_dbg(dev, "ECC step %d: erased\n", step);
} else if (stat == UNCORR_ERR || stat > MAX_CORR_ERR) {
dev_dbg(dev, "ECC step %d: uncorrectable\n", step);
mtd->ecc_stats.failed++;
failure = true;
} else {
dev_dbg(dev, "ECC step %d: %d bits corrected\n",
step, stat);
max_bf = max_t(unsigned int, max_bf, stat);
mtd->ecc_stats.corrected += stat;
}
}
return failure ? -EBADMSG : max_bf;
}
/* External ECC engine helpers */
static int mxic_ecc_prepare_io_req_external(struct nand_device *nand,
struct nand_page_io_req *req)
{
struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
int offset, nents, step, ret;
if (req->mode == MTD_OPS_RAW)
return 0;
nand_ecc_tweak_req(&ctx->req_ctx, req);
ctx->req = req;
if (req->type == NAND_PAGE_READ)
return 0;
mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat,
ctx->req->oobbuf.out);
sg_set_buf(&ctx->sg[0], req->databuf.out, req->datalen);
sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
req->ooblen + (ctx->steps * STAT_BYTES));
nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
if (!nents)
return -EINVAL;
mutex_lock(&mxic->lock);
for (step = 0; step < ctx->steps; step++) {
writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
mxic->regs + SDMA_MAIN_ADDR);
writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
mxic->regs + SDMA_SPARE_ADDR);
ret = mxic_ecc_process_data(mxic, ctx->req->type);
if (ret)
break;
}
mutex_unlock(&mxic->lock);
dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
if (ret)
return ret;
/* Retrieve the calculated ECC bytes */
for (step = 0; step < ctx->steps; step++) {
offset = ctx->meta_sz + (step * ctx->oob_step_sz);
mtd_ooblayout_get_eccbytes(mtd,
(u8 *)ctx->req->oobbuf.out + offset,
ctx->oobwithstat + (step * STAT_BYTES),
step * ctx->parity_sz,
ctx->parity_sz);
}
return 0;
}
static int mxic_ecc_finish_io_req_external(struct nand_device *nand,
struct nand_page_io_req *req)
{
struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
int nents, step, ret;
if (req->mode == MTD_OPS_RAW)
return 0;
if (req->type == NAND_PAGE_WRITE) {
nand_ecc_restore_req(&ctx->req_ctx, req);
return 0;
}
/* Copy the OOB buffer and add room for the ECC engine status bytes */
mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat, ctx->req->oobbuf.in);
sg_set_buf(&ctx->sg[0], req->databuf.in, req->datalen);
sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
req->ooblen + (ctx->steps * STAT_BYTES));
nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
if (!nents)
return -EINVAL;
mutex_lock(&mxic->lock);
for (step = 0; step < ctx->steps; step++) {
writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
mxic->regs + SDMA_MAIN_ADDR);
writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
mxic->regs + SDMA_SPARE_ADDR);
ret = mxic_ecc_process_data(mxic, ctx->req->type);
if (ret)
break;
}
mutex_unlock(&mxic->lock);
dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
if (ret) {
nand_ecc_restore_req(&ctx->req_ctx, req);
return ret;
}
/* Extract the status bytes and reconstruct the buffer */
mxic_ecc_extract_status_bytes(ctx);
mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in, ctx->oobwithstat);
nand_ecc_restore_req(&ctx->req_ctx, req);
return mxic_ecc_count_biterrs(mxic, nand);
}
/* Pipelined ECC engine helpers */
static int mxic_ecc_prepare_io_req_pipelined(struct nand_device *nand,
struct nand_page_io_req *req)
{
struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
int nents;
if (req->mode == MTD_OPS_RAW)
return 0;
nand_ecc_tweak_req(&ctx->req_ctx, req);
ctx->req = req;
/* Copy the OOB buffer and add room for the ECC engine status bytes */
mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat, ctx->req->oobbuf.in);
sg_set_buf(&ctx->sg[0], req->databuf.in, req->datalen);
sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
req->ooblen + (ctx->steps * STAT_BYTES));
nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
if (!nents)
return -EINVAL;
mutex_lock(&mxic->lock);
writel(sg_dma_address(&ctx->sg[0]), mxic->regs + SDMA_MAIN_ADDR);
writel(sg_dma_address(&ctx->sg[1]), mxic->regs + SDMA_SPARE_ADDR);
return 0;
}
static int mxic_ecc_finish_io_req_pipelined(struct nand_device *nand,
struct nand_page_io_req *req)
{
struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
int ret = 0;
if (req->mode == MTD_OPS_RAW)
return 0;
mutex_unlock(&mxic->lock);
dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
if (req->type == NAND_PAGE_READ) {
mxic_ecc_extract_status_bytes(ctx);
mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in,
ctx->oobwithstat);
ret = mxic_ecc_count_biterrs(mxic, nand);
}
nand_ecc_restore_req(&ctx->req_ctx, req);
return ret;
}
static struct nand_ecc_engine_ops mxic_ecc_engine_external_ops = {
.init_ctx = mxic_ecc_init_ctx_external,
.cleanup_ctx = mxic_ecc_cleanup_ctx,
.prepare_io_req = mxic_ecc_prepare_io_req_external,
.finish_io_req = mxic_ecc_finish_io_req_external,
};
static struct nand_ecc_engine_ops mxic_ecc_engine_pipelined_ops = {
.init_ctx = mxic_ecc_init_ctx_pipelined,
.cleanup_ctx = mxic_ecc_cleanup_ctx,
.prepare_io_req = mxic_ecc_prepare_io_req_pipelined,
.finish_io_req = mxic_ecc_finish_io_req_pipelined,
};
struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
{
return &mxic_ecc_engine_pipelined_ops;
}
EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_ops);
static struct platform_device *
mxic_ecc_get_pdev(struct platform_device *spi_pdev)
{
struct platform_device *eng_pdev;
struct device_node *np;
/* Retrieve the nand-ecc-engine phandle */
np = of_parse_phandle(spi_pdev->dev.of_node, "nand-ecc-engine", 0);
if (!np)
return NULL;
/* Jump to the engine's device node */
eng_pdev = of_find_device_by_node(np);
of_node_put(np);
return eng_pdev;
}
void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng)
{
struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng);
platform_device_put(to_platform_device(mxic->dev));
}
EXPORT_SYMBOL_GPL(mxic_ecc_put_pipelined_engine);
struct nand_ecc_engine *
mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev)
{
struct platform_device *eng_pdev;
struct mxic_ecc_engine *mxic;
eng_pdev = mxic_ecc_get_pdev(spi_pdev);
if (!eng_pdev)
return ERR_PTR(-ENODEV);
mxic = platform_get_drvdata(eng_pdev);
if (!mxic) {
platform_device_put(eng_pdev);
return ERR_PTR(-EPROBE_DEFER);
}
return &mxic->pipelined_engine;
}
EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_engine);
/*
* Only the external ECC engine is exported as the pipelined is SoC specific, so
* it is registered directly by the drivers that wrap it.
*/
static int mxic_ecc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mxic_ecc_engine *mxic;
int ret;
mxic = devm_kzalloc(&pdev->dev, sizeof(*mxic), GFP_KERNEL);
if (!mxic)
return -ENOMEM;
mxic->dev = &pdev->dev;
/*
* Both memory regions for the ECC engine itself and the AXI slave
* address are mandatory.
*/
mxic->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mxic->regs)) {
dev_err(&pdev->dev, "Missing memory region\n");
return PTR_ERR(mxic->regs);
}
mxic_ecc_disable_engine(mxic);
mxic_ecc_disable_int(mxic);
/* IRQ is optional yet much more efficient */
mxic->irq = platform_get_irq_byname_optional(pdev, "ecc-engine");
if (mxic->irq > 0) {
ret = devm_request_irq(&pdev->dev, mxic->irq, mxic_ecc_isr, 0,
"mxic-ecc", mxic);
if (ret)
return ret;
} else {
dev_info(dev, "Invalid or missing IRQ, fallback to polling\n");
mxic->irq = 0;
}
mutex_init(&mxic->lock);
/*
* In external mode, the device is the ECC engine. In pipelined mode,
* the device is the host controller. The device is used to match the
* right ECC engine based on the DT properties.
*/
mxic->external_engine.dev = &pdev->dev;
mxic->external_engine.integration = NAND_ECC_ENGINE_INTEGRATION_EXTERNAL;
mxic->external_engine.ops = &mxic_ecc_engine_external_ops;
nand_ecc_register_on_host_hw_engine(&mxic->external_engine);
platform_set_drvdata(pdev, mxic);
return 0;
}
static void mxic_ecc_remove(struct platform_device *pdev)
{
struct mxic_ecc_engine *mxic = platform_get_drvdata(pdev);
nand_ecc_unregister_on_host_hw_engine(&mxic->external_engine);
}
static const struct of_device_id mxic_ecc_of_ids[] = {
{
.compatible = "mxicy,nand-ecc-engine-rev3",
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mxic_ecc_of_ids);
static struct platform_driver mxic_ecc_driver = {
.driver = {
.name = "mxic-nand-ecc-engine",
.of_match_table = mxic_ecc_of_ids,
},
.probe = mxic_ecc_probe,
.remove_new = mxic_ecc_remove,
};
module_platform_driver(mxic_ecc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Miquel Raynal <[email protected]>");
MODULE_DESCRIPTION("Macronix NAND hardware ECC controller");
| linux-master | drivers/mtd/nand/ecc-mxic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017 Free Electrons
*
* Authors:
* Boris Brezillon <[email protected]>
* Peter Pan <[email protected]>
*/
#define pr_fmt(fmt) "nand: " fmt
#include <linux/module.h>
#include <linux/mtd/nand.h>
/**
* nanddev_isbad() - Check if a block is bad
* @nand: NAND device
* @pos: position pointing to the block we want to check
*
* Return: true if the block is bad, false otherwise.
*/
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
{
if (mtd_check_expert_analysis_mode())
return false;
if (nanddev_bbt_is_initialized(nand)) {
unsigned int entry;
int status;
entry = nanddev_bbt_pos_to_entry(nand, pos);
status = nanddev_bbt_get_block_status(nand, entry);
/* Lazy block status retrieval */
if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
if (nand->ops->isbad(nand, pos))
status = NAND_BBT_BLOCK_FACTORY_BAD;
else
status = NAND_BBT_BLOCK_GOOD;
nanddev_bbt_set_block_status(nand, entry, status);
}
if (status == NAND_BBT_BLOCK_WORN ||
status == NAND_BBT_BLOCK_FACTORY_BAD)
return true;
return false;
}
return nand->ops->isbad(nand, pos);
}
EXPORT_SYMBOL_GPL(nanddev_isbad);
/**
* nanddev_markbad() - Mark a block as bad
* @nand: NAND device
* @pos: position of the block to mark bad
*
* Mark a block bad. This function is updating the BBT if available and
* calls the low-level markbad hook (nand->ops->markbad()).
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
{
struct mtd_info *mtd = nanddev_to_mtd(nand);
unsigned int entry;
int ret = 0;
if (nanddev_isbad(nand, pos))
return 0;
ret = nand->ops->markbad(nand, pos);
if (ret)
pr_warn("failed to write BBM to block @%llx (err = %d)\n",
nanddev_pos_to_offs(nand, pos), ret);
if (!nanddev_bbt_is_initialized(nand))
goto out;
entry = nanddev_bbt_pos_to_entry(nand, pos);
ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
if (ret)
goto out;
ret = nanddev_bbt_update(nand);
out:
if (!ret)
mtd->ecc_stats.badblocks++;
return ret;
}
EXPORT_SYMBOL_GPL(nanddev_markbad);
/**
* nanddev_isreserved() - Check whether an eraseblock is reserved or not
* @nand: NAND device
* @pos: NAND position to test
*
* Checks whether the eraseblock pointed by @pos is reserved or not.
*
* Return: true if the eraseblock is reserved, false otherwise.
*/
bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
{
unsigned int entry;
int status;
if (!nanddev_bbt_is_initialized(nand))
return false;
/* Return info from the table */
entry = nanddev_bbt_pos_to_entry(nand, pos);
status = nanddev_bbt_get_block_status(nand, entry);
return status == NAND_BBT_BLOCK_RESERVED;
}
EXPORT_SYMBOL_GPL(nanddev_isreserved);
/**
* nanddev_erase() - Erase a NAND portion
* @nand: NAND device
* @pos: position of the block to erase
*
* Erases the block if it's not bad.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
static int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
{
if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
pr_warn("attempt to erase a bad/reserved block @%llx\n",
nanddev_pos_to_offs(nand, pos));
return -EIO;
}
return nand->ops->erase(nand, pos);
}
/**
* nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
* @mtd: MTD device
* @einfo: erase request
*
* This is a simple mtd->_erase() implementation iterating over all blocks
* concerned by @einfo and calling nand->ops->erase() on each of them.
*
* Note that mtd->_erase should not be directly assigned to this helper,
* because there's no locking here. NAND specialized layers should instead
* implement there own wrapper around nanddev_mtd_erase() taking the
* appropriate lock before calling nanddev_mtd_erase().
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
struct nand_pos pos, last;
int ret;
nanddev_offs_to_pos(nand, einfo->addr, &pos);
nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
while (nanddev_pos_cmp(&pos, &last) <= 0) {
ret = nanddev_erase(nand, &pos);
if (ret) {
einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
return ret;
}
nanddev_pos_next_eraseblock(nand, &pos);
}
return 0;
}
EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
/**
* nanddev_mtd_max_bad_blocks() - Get the maximum number of bad eraseblock on
* a specific region of the NAND device
* @mtd: MTD device
* @offs: offset of the NAND region
* @len: length of the NAND region
*
* Default implementation for mtd->_max_bad_blocks(). Only works if
* nand->memorg.max_bad_eraseblocks_per_lun is > 0.
*
* Return: a positive number encoding the maximum number of eraseblocks on a
* portion of memory, a negative error code otherwise.
*/
int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
struct nand_pos pos, end;
unsigned int max_bb = 0;
if (!nand->memorg.max_bad_eraseblocks_per_lun)
return -ENOTSUPP;
nanddev_offs_to_pos(nand, offs, &pos);
nanddev_offs_to_pos(nand, offs + len, &end);
for (nanddev_offs_to_pos(nand, offs, &pos);
nanddev_pos_cmp(&pos, &end) < 0;
nanddev_pos_next_lun(nand, &pos))
max_bb += nand->memorg.max_bad_eraseblocks_per_lun;
return max_bb;
}
EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks);
/**
* nanddev_get_ecc_engine() - Find and get a suitable ECC engine
* @nand: NAND device
*/
static int nanddev_get_ecc_engine(struct nand_device *nand)
{
int engine_type;
/* Read the user desires in terms of ECC engine/configuration */
of_get_nand_ecc_user_config(nand);
engine_type = nand->ecc.user_conf.engine_type;
if (engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
engine_type = nand->ecc.defaults.engine_type;
switch (engine_type) {
case NAND_ECC_ENGINE_TYPE_NONE:
return 0;
case NAND_ECC_ENGINE_TYPE_SOFT:
nand->ecc.engine = nand_ecc_get_sw_engine(nand);
break;
case NAND_ECC_ENGINE_TYPE_ON_DIE:
nand->ecc.engine = nand_ecc_get_on_die_hw_engine(nand);
break;
case NAND_ECC_ENGINE_TYPE_ON_HOST:
nand->ecc.engine = nand_ecc_get_on_host_hw_engine(nand);
if (PTR_ERR(nand->ecc.engine) == -EPROBE_DEFER)
return -EPROBE_DEFER;
break;
default:
pr_err("Missing ECC engine type\n");
}
if (!nand->ecc.engine)
return -EINVAL;
return 0;
}
/**
* nanddev_put_ecc_engine() - Dettach and put the in-use ECC engine
* @nand: NAND device
*/
static int nanddev_put_ecc_engine(struct nand_device *nand)
{
switch (nand->ecc.ctx.conf.engine_type) {
case NAND_ECC_ENGINE_TYPE_ON_HOST:
nand_ecc_put_on_host_hw_engine(nand);
break;
case NAND_ECC_ENGINE_TYPE_NONE:
case NAND_ECC_ENGINE_TYPE_SOFT:
case NAND_ECC_ENGINE_TYPE_ON_DIE:
default:
break;
}
return 0;
}
/**
* nanddev_find_ecc_configuration() - Find a suitable ECC configuration
* @nand: NAND device
*/
static int nanddev_find_ecc_configuration(struct nand_device *nand)
{
int ret;
if (!nand->ecc.engine)
return -ENOTSUPP;
ret = nand_ecc_init_ctx(nand);
if (ret)
return ret;
if (!nand_ecc_is_strong_enough(nand))
pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
nand->mtd.name);
return 0;
}
/**
* nanddev_ecc_engine_init() - Initialize an ECC engine for the chip
* @nand: NAND device
*/
int nanddev_ecc_engine_init(struct nand_device *nand)
{
int ret;
/* Look for the ECC engine to use */
ret = nanddev_get_ecc_engine(nand);
if (ret) {
if (ret != -EPROBE_DEFER)
pr_err("No ECC engine found\n");
return ret;
}
/* No ECC engine requested */
if (!nand->ecc.engine)
return 0;
/* Configure the engine: balance user input and chip requirements */
ret = nanddev_find_ecc_configuration(nand);
if (ret) {
pr_err("No suitable ECC configuration\n");
nanddev_put_ecc_engine(nand);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(nanddev_ecc_engine_init);
/**
* nanddev_ecc_engine_cleanup() - Cleanup ECC engine initializations
* @nand: NAND device
*/
void nanddev_ecc_engine_cleanup(struct nand_device *nand)
{
if (nand->ecc.engine)
nand_ecc_cleanup_ctx(nand);
nanddev_put_ecc_engine(nand);
}
EXPORT_SYMBOL_GPL(nanddev_ecc_engine_cleanup);
/**
* nanddev_init() - Initialize a NAND device
* @nand: NAND device
* @ops: NAND device operations
* @owner: NAND device owner
*
* Initializes a NAND device object. Consistency checks are done on @ops and
* @nand->memorg. Also takes care of initializing the BBT.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
struct module *owner)
{
struct mtd_info *mtd = nanddev_to_mtd(nand);
struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
if (!nand || !ops)
return -EINVAL;
if (!ops->erase || !ops->markbad || !ops->isbad)
return -EINVAL;
if (!memorg->bits_per_cell || !memorg->pagesize ||
!memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
!memorg->planes_per_lun || !memorg->luns_per_target ||
!memorg->ntargets)
return -EINVAL;
nand->rowconv.eraseblock_addr_shift =
fls(memorg->pages_per_eraseblock - 1);
nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
nand->rowconv.eraseblock_addr_shift;
nand->ops = ops;
mtd->type = memorg->bits_per_cell == 1 ?
MTD_NANDFLASH : MTD_MLCNANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
mtd->writesize = memorg->pagesize;
mtd->writebufsize = memorg->pagesize;
mtd->oobsize = memorg->oobsize;
mtd->size = nanddev_size(nand);
mtd->owner = owner;
return nanddev_bbt_init(nand);
}
EXPORT_SYMBOL_GPL(nanddev_init);
/**
* nanddev_cleanup() - Release resources allocated in nanddev_init()
* @nand: NAND device
*
* Basically undoes what has been done in nanddev_init().
*/
void nanddev_cleanup(struct nand_device *nand)
{
if (nanddev_bbt_is_initialized(nand))
nanddev_bbt_cleanup(nand);
}
EXPORT_SYMBOL_GPL(nanddev_cleanup);
MODULE_DESCRIPTION("Generic NAND framework");
MODULE_AUTHOR("Boris Brezillon <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mtd/nand/core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* This file contains an ECC algorithm that detects and corrects 1 bit
* errors in a 256 byte block of data.
*
* Copyright © 2008 Koninklijke Philips Electronics NV.
* Author: Frans Meulenbroeks
*
* Completely replaces the previous ECC implementation which was written by:
* Steven J. Hill ([email protected])
* Thomas Gleixner ([email protected])
*
* Information on how this algorithm works and how it was developed
* can be found in Documentation/driver-api/mtd/nand_ecc.rst
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
/*
* invparity is a 256 byte table that contains the odd parity
* for each byte. So if the number of bits in a byte is even,
* the array element is 1, and when the number of bits is odd
* the array eleemnt is 0.
*/
static const char invparity[256] = {
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
};
/*
* bitsperbyte contains the number of bits per byte
* this is only used for testing and repairing parity
* (a precalculated value slightly improves performance)
*/
static const char bitsperbyte[256] = {
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
};
/*
* addressbits is a lookup table to filter out the bits from the xor-ed
* ECC data that identify the faulty location.
* this is only used for repairing parity
* see the comments in nand_ecc_sw_hamming_correct for more details
*/
static const char addressbits[256] = {
0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
};
int ecc_sw_hamming_calculate(const unsigned char *buf, unsigned int step_size,
unsigned char *code, bool sm_order)
{
const u32 *bp = (uint32_t *)buf;
const u32 eccsize_mult = (step_size == 256) ? 1 : 2;
/* current value in buffer */
u32 cur;
/* rp0..rp17 are the various accumulated parities (per byte) */
u32 rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7, rp8, rp9, rp10, rp11, rp12,
rp13, rp14, rp15, rp16, rp17;
/* Cumulative parity for all data */
u32 par;
/* Cumulative parity at the end of the loop (rp12, rp14, rp16) */
u32 tmppar;
int i;
par = 0;
rp4 = 0;
rp6 = 0;
rp8 = 0;
rp10 = 0;
rp12 = 0;
rp14 = 0;
rp16 = 0;
rp17 = 0;
/*
* The loop is unrolled a number of times;
* This avoids if statements to decide on which rp value to update
* Also we process the data by longwords.
* Note: passing unaligned data might give a performance penalty.
* It is assumed that the buffers are aligned.
* tmppar is the cumulative sum of this iteration.
* needed for calculating rp12, rp14, rp16 and par
* also used as a performance improvement for rp6, rp8 and rp10
*/
for (i = 0; i < eccsize_mult << 2; i++) {
cur = *bp++;
tmppar = cur;
rp4 ^= cur;
cur = *bp++;
tmppar ^= cur;
rp6 ^= tmppar;
cur = *bp++;
tmppar ^= cur;
rp4 ^= cur;
cur = *bp++;
tmppar ^= cur;
rp8 ^= tmppar;
cur = *bp++;
tmppar ^= cur;
rp4 ^= cur;
rp6 ^= cur;
cur = *bp++;
tmppar ^= cur;
rp6 ^= cur;
cur = *bp++;
tmppar ^= cur;
rp4 ^= cur;
cur = *bp++;
tmppar ^= cur;
rp10 ^= tmppar;
cur = *bp++;
tmppar ^= cur;
rp4 ^= cur;
rp6 ^= cur;
rp8 ^= cur;
cur = *bp++;
tmppar ^= cur;
rp6 ^= cur;
rp8 ^= cur;
cur = *bp++;
tmppar ^= cur;
rp4 ^= cur;
rp8 ^= cur;
cur = *bp++;
tmppar ^= cur;
rp8 ^= cur;
cur = *bp++;
tmppar ^= cur;
rp4 ^= cur;
rp6 ^= cur;
cur = *bp++;
tmppar ^= cur;
rp6 ^= cur;
cur = *bp++;
tmppar ^= cur;
rp4 ^= cur;
cur = *bp++;
tmppar ^= cur;
par ^= tmppar;
if ((i & 0x1) == 0)
rp12 ^= tmppar;
if ((i & 0x2) == 0)
rp14 ^= tmppar;
if (eccsize_mult == 2 && (i & 0x4) == 0)
rp16 ^= tmppar;
}
/*
* handle the fact that we use longword operations
* we'll bring rp4..rp14..rp16 back to single byte entities by
* shifting and xoring first fold the upper and lower 16 bits,
* then the upper and lower 8 bits.
*/
rp4 ^= (rp4 >> 16);
rp4 ^= (rp4 >> 8);
rp4 &= 0xff;
rp6 ^= (rp6 >> 16);
rp6 ^= (rp6 >> 8);
rp6 &= 0xff;
rp8 ^= (rp8 >> 16);
rp8 ^= (rp8 >> 8);
rp8 &= 0xff;
rp10 ^= (rp10 >> 16);
rp10 ^= (rp10 >> 8);
rp10 &= 0xff;
rp12 ^= (rp12 >> 16);
rp12 ^= (rp12 >> 8);
rp12 &= 0xff;
rp14 ^= (rp14 >> 16);
rp14 ^= (rp14 >> 8);
rp14 &= 0xff;
if (eccsize_mult == 2) {
rp16 ^= (rp16 >> 16);
rp16 ^= (rp16 >> 8);
rp16 &= 0xff;
}
/*
* we also need to calculate the row parity for rp0..rp3
* This is present in par, because par is now
* rp3 rp3 rp2 rp2 in little endian and
* rp2 rp2 rp3 rp3 in big endian
* as well as
* rp1 rp0 rp1 rp0 in little endian and
* rp0 rp1 rp0 rp1 in big endian
* First calculate rp2 and rp3
*/
#ifdef __BIG_ENDIAN
rp2 = (par >> 16);
rp2 ^= (rp2 >> 8);
rp2 &= 0xff;
rp3 = par & 0xffff;
rp3 ^= (rp3 >> 8);
rp3 &= 0xff;
#else
rp3 = (par >> 16);
rp3 ^= (rp3 >> 8);
rp3 &= 0xff;
rp2 = par & 0xffff;
rp2 ^= (rp2 >> 8);
rp2 &= 0xff;
#endif
/* reduce par to 16 bits then calculate rp1 and rp0 */
par ^= (par >> 16);
#ifdef __BIG_ENDIAN
rp0 = (par >> 8) & 0xff;
rp1 = (par & 0xff);
#else
rp1 = (par >> 8) & 0xff;
rp0 = (par & 0xff);
#endif
/* finally reduce par to 8 bits */
par ^= (par >> 8);
par &= 0xff;
/*
* and calculate rp5..rp15..rp17
* note that par = rp4 ^ rp5 and due to the commutative property
* of the ^ operator we can say:
* rp5 = (par ^ rp4);
* The & 0xff seems superfluous, but benchmarking learned that
* leaving it out gives slightly worse results. No idea why, probably
* it has to do with the way the pipeline in pentium is organized.
*/
rp5 = (par ^ rp4) & 0xff;
rp7 = (par ^ rp6) & 0xff;
rp9 = (par ^ rp8) & 0xff;
rp11 = (par ^ rp10) & 0xff;
rp13 = (par ^ rp12) & 0xff;
rp15 = (par ^ rp14) & 0xff;
if (eccsize_mult == 2)
rp17 = (par ^ rp16) & 0xff;
/*
* Finally calculate the ECC bits.
* Again here it might seem that there are performance optimisations
* possible, but benchmarks showed that on the system this is developed
* the code below is the fastest
*/
if (sm_order) {
code[0] = (invparity[rp7] << 7) | (invparity[rp6] << 6) |
(invparity[rp5] << 5) | (invparity[rp4] << 4) |
(invparity[rp3] << 3) | (invparity[rp2] << 2) |
(invparity[rp1] << 1) | (invparity[rp0]);
code[1] = (invparity[rp15] << 7) | (invparity[rp14] << 6) |
(invparity[rp13] << 5) | (invparity[rp12] << 4) |
(invparity[rp11] << 3) | (invparity[rp10] << 2) |
(invparity[rp9] << 1) | (invparity[rp8]);
} else {
code[1] = (invparity[rp7] << 7) | (invparity[rp6] << 6) |
(invparity[rp5] << 5) | (invparity[rp4] << 4) |
(invparity[rp3] << 3) | (invparity[rp2] << 2) |
(invparity[rp1] << 1) | (invparity[rp0]);
code[0] = (invparity[rp15] << 7) | (invparity[rp14] << 6) |
(invparity[rp13] << 5) | (invparity[rp12] << 4) |
(invparity[rp11] << 3) | (invparity[rp10] << 2) |
(invparity[rp9] << 1) | (invparity[rp8]);
}
if (eccsize_mult == 1)
code[2] =
(invparity[par & 0xf0] << 7) |
(invparity[par & 0x0f] << 6) |
(invparity[par & 0xcc] << 5) |
(invparity[par & 0x33] << 4) |
(invparity[par & 0xaa] << 3) |
(invparity[par & 0x55] << 2) |
3;
else
code[2] =
(invparity[par & 0xf0] << 7) |
(invparity[par & 0x0f] << 6) |
(invparity[par & 0xcc] << 5) |
(invparity[par & 0x33] << 4) |
(invparity[par & 0xaa] << 3) |
(invparity[par & 0x55] << 2) |
(invparity[rp17] << 1) |
(invparity[rp16] << 0);
return 0;
}
EXPORT_SYMBOL(ecc_sw_hamming_calculate);
/**
* nand_ecc_sw_hamming_calculate - Calculate 3-byte ECC for 256/512-byte block
* @nand: NAND device
* @buf: Input buffer with raw data
* @code: Output buffer with ECC
*/
int nand_ecc_sw_hamming_calculate(struct nand_device *nand,
const unsigned char *buf, unsigned char *code)
{
struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv;
unsigned int step_size = nand->ecc.ctx.conf.step_size;
bool sm_order = engine_conf ? engine_conf->sm_order : false;
return ecc_sw_hamming_calculate(buf, step_size, code, sm_order);
}
EXPORT_SYMBOL(nand_ecc_sw_hamming_calculate);
int ecc_sw_hamming_correct(unsigned char *buf, unsigned char *read_ecc,
unsigned char *calc_ecc, unsigned int step_size,
bool sm_order)
{
const u32 eccsize_mult = step_size >> 8;
unsigned char b0, b1, b2, bit_addr;
unsigned int byte_addr;
/*
* b0 to b2 indicate which bit is faulty (if any)
* we might need the xor result more than once,
* so keep them in a local var
*/
if (sm_order) {
b0 = read_ecc[0] ^ calc_ecc[0];
b1 = read_ecc[1] ^ calc_ecc[1];
} else {
b0 = read_ecc[1] ^ calc_ecc[1];
b1 = read_ecc[0] ^ calc_ecc[0];
}
b2 = read_ecc[2] ^ calc_ecc[2];
/* check if there are any bitfaults */
/* repeated if statements are slightly more efficient than switch ... */
/* ordered in order of likelihood */
if ((b0 | b1 | b2) == 0)
return 0; /* no error */
if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) &&
(((b1 ^ (b1 >> 1)) & 0x55) == 0x55) &&
((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) ||
(eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) {
/* single bit error */
/*
* rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty
* byte, cp 5/3/1 indicate the faulty bit.
* A lookup table (called addressbits) is used to filter
* the bits from the byte they are in.
* A marginal optimisation is possible by having three
* different lookup tables.
* One as we have now (for b0), one for b2
* (that would avoid the >> 1), and one for b1 (with all values
* << 4). However it was felt that introducing two more tables
* hardly justify the gain.
*
* The b2 shift is there to get rid of the lowest two bits.
* We could also do addressbits[b2] >> 1 but for the
* performance it does not make any difference
*/
if (eccsize_mult == 1)
byte_addr = (addressbits[b1] << 4) + addressbits[b0];
else
byte_addr = (addressbits[b2 & 0x3] << 8) +
(addressbits[b1] << 4) + addressbits[b0];
bit_addr = addressbits[b2 >> 2];
/* flip the bit */
buf[byte_addr] ^= (1 << bit_addr);
return 1;
}
/* count nr of bits; use table lookup, faster than calculating it */
if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
return 1; /* error in ECC data; no action needed */
pr_err("%s: uncorrectable ECC error\n", __func__);
return -EBADMSG;
}
EXPORT_SYMBOL(ecc_sw_hamming_correct);
/**
* nand_ecc_sw_hamming_correct - Detect and correct bit error(s)
* @nand: NAND device
* @buf: Raw data read from the chip
* @read_ecc: ECC bytes read from the chip
* @calc_ecc: ECC calculated from the raw data
*
* Detect and correct up to 1 bit error per 256/512-byte block.
*/
int nand_ecc_sw_hamming_correct(struct nand_device *nand, unsigned char *buf,
unsigned char *read_ecc,
unsigned char *calc_ecc)
{
struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv;
unsigned int step_size = nand->ecc.ctx.conf.step_size;
bool sm_order = engine_conf ? engine_conf->sm_order : false;
return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc, step_size,
sm_order);
}
EXPORT_SYMBOL(nand_ecc_sw_hamming_correct);
int nand_ecc_sw_hamming_init_ctx(struct nand_device *nand)
{
struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
struct nand_ecc_sw_hamming_conf *engine_conf;
struct mtd_info *mtd = nanddev_to_mtd(nand);
int ret;
if (!mtd->ooblayout) {
switch (mtd->oobsize) {
case 8:
case 16:
mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
break;
case 64:
case 128:
mtd_set_ooblayout(mtd,
nand_get_large_page_hamming_ooblayout());
break;
default:
return -ENOTSUPP;
}
}
conf->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
conf->algo = NAND_ECC_ALGO_HAMMING;
conf->step_size = nand->ecc.user_conf.step_size;
conf->strength = 1;
/* Use the strongest configuration by default */
if (conf->step_size != 256 && conf->step_size != 512)
conf->step_size = 256;
engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
if (!engine_conf)
return -ENOMEM;
ret = nand_ecc_init_req_tweaking(&engine_conf->req_ctx, nand);
if (ret)
goto free_engine_conf;
engine_conf->code_size = 3;
engine_conf->calc_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
engine_conf->code_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
if (!engine_conf->calc_buf || !engine_conf->code_buf) {
ret = -ENOMEM;
goto free_bufs;
}
nand->ecc.ctx.priv = engine_conf;
nand->ecc.ctx.nsteps = mtd->writesize / conf->step_size;
nand->ecc.ctx.total = nand->ecc.ctx.nsteps * engine_conf->code_size;
return 0;
free_bufs:
nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx);
kfree(engine_conf->calc_buf);
kfree(engine_conf->code_buf);
free_engine_conf:
kfree(engine_conf);
return ret;
}
EXPORT_SYMBOL(nand_ecc_sw_hamming_init_ctx);
void nand_ecc_sw_hamming_cleanup_ctx(struct nand_device *nand)
{
struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv;
if (engine_conf) {
nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx);
kfree(engine_conf->calc_buf);
kfree(engine_conf->code_buf);
kfree(engine_conf);
}
}
EXPORT_SYMBOL(nand_ecc_sw_hamming_cleanup_ctx);
static int nand_ecc_sw_hamming_prepare_io_req(struct nand_device *nand,
struct nand_page_io_req *req)
{
struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv;
struct mtd_info *mtd = nanddev_to_mtd(nand);
int eccsize = nand->ecc.ctx.conf.step_size;
int eccbytes = engine_conf->code_size;
int eccsteps = nand->ecc.ctx.nsteps;
int total = nand->ecc.ctx.total;
u8 *ecccalc = engine_conf->calc_buf;
const u8 *data;
int i;
/* Nothing to do for a raw operation */
if (req->mode == MTD_OPS_RAW)
return 0;
/* This engine does not provide BBM/free OOB bytes protection */
if (!req->datalen)
return 0;
nand_ecc_tweak_req(&engine_conf->req_ctx, req);
/* No more preparation for page read */
if (req->type == NAND_PAGE_READ)
return 0;
/* Preparation for page write: derive the ECC bytes and place them */
for (i = 0, data = req->databuf.out;
eccsteps;
eccsteps--, i += eccbytes, data += eccsize)
nand_ecc_sw_hamming_calculate(nand, data, &ecccalc[i]);
return mtd_ooblayout_set_eccbytes(mtd, ecccalc, (void *)req->oobbuf.out,
0, total);
}
static int nand_ecc_sw_hamming_finish_io_req(struct nand_device *nand,
struct nand_page_io_req *req)
{
struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv;
struct mtd_info *mtd = nanddev_to_mtd(nand);
int eccsize = nand->ecc.ctx.conf.step_size;
int total = nand->ecc.ctx.total;
int eccbytes = engine_conf->code_size;
int eccsteps = nand->ecc.ctx.nsteps;
u8 *ecccalc = engine_conf->calc_buf;
u8 *ecccode = engine_conf->code_buf;
unsigned int max_bitflips = 0;
u8 *data = req->databuf.in;
int i, ret;
/* Nothing to do for a raw operation */
if (req->mode == MTD_OPS_RAW)
return 0;
/* This engine does not provide BBM/free OOB bytes protection */
if (!req->datalen)
return 0;
/* No more preparation for page write */
if (req->type == NAND_PAGE_WRITE) {
nand_ecc_restore_req(&engine_conf->req_ctx, req);
return 0;
}
/* Finish a page read: retrieve the (raw) ECC bytes*/
ret = mtd_ooblayout_get_eccbytes(mtd, ecccode, req->oobbuf.in, 0,
total);
if (ret)
return ret;
/* Calculate the ECC bytes */
for (i = 0; eccsteps; eccsteps--, i += eccbytes, data += eccsize)
nand_ecc_sw_hamming_calculate(nand, data, &ecccalc[i]);
/* Finish a page read: compare and correct */
for (eccsteps = nand->ecc.ctx.nsteps, i = 0, data = req->databuf.in;
eccsteps;
eccsteps--, i += eccbytes, data += eccsize) {
int stat = nand_ecc_sw_hamming_correct(nand, data,
&ecccode[i],
&ecccalc[i]);
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
nand_ecc_restore_req(&engine_conf->req_ctx, req);
return max_bitflips;
}
static struct nand_ecc_engine_ops nand_ecc_sw_hamming_engine_ops = {
.init_ctx = nand_ecc_sw_hamming_init_ctx,
.cleanup_ctx = nand_ecc_sw_hamming_cleanup_ctx,
.prepare_io_req = nand_ecc_sw_hamming_prepare_io_req,
.finish_io_req = nand_ecc_sw_hamming_finish_io_req,
};
static struct nand_ecc_engine nand_ecc_sw_hamming_engine = {
.ops = &nand_ecc_sw_hamming_engine_ops,
};
struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void)
{
return &nand_ecc_sw_hamming_engine;
}
EXPORT_SYMBOL(nand_ecc_sw_hamming_get_engine);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Frans Meulenbroeks <[email protected]>");
MODULE_DESCRIPTION("NAND software Hamming ECC support");
| linux-master | drivers/mtd/nand/ecc-sw-hamming.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017 Free Electrons
*
* Authors:
* Boris Brezillon <[email protected]>
* Peter Pan <[email protected]>
*/
#define pr_fmt(fmt) "nand-bbt: " fmt
#include <linux/mtd/nand.h>
#include <linux/slab.h>
/**
* nanddev_bbt_init() - Initialize the BBT (Bad Block Table)
* @nand: NAND device
*
* Initialize the in-memory BBT.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int nanddev_bbt_init(struct nand_device *nand)
{
unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
unsigned int nblocks = nanddev_neraseblocks(nand);
nand->bbt.cache = bitmap_zalloc(nblocks * bits_per_block, GFP_KERNEL);
if (!nand->bbt.cache)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(nanddev_bbt_init);
/**
* nanddev_bbt_cleanup() - Cleanup the BBT (Bad Block Table)
* @nand: NAND device
*
* Undoes what has been done in nanddev_bbt_init()
*/
void nanddev_bbt_cleanup(struct nand_device *nand)
{
bitmap_free(nand->bbt.cache);
}
EXPORT_SYMBOL_GPL(nanddev_bbt_cleanup);
/**
* nanddev_bbt_update() - Update a BBT
* @nand: nand device
*
* Update the BBT. Currently a NOP function since on-flash bbt is not yet
* supported.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int nanddev_bbt_update(struct nand_device *nand)
{
return 0;
}
EXPORT_SYMBOL_GPL(nanddev_bbt_update);
/**
* nanddev_bbt_get_block_status() - Return the status of an eraseblock
* @nand: nand device
* @entry: the BBT entry
*
* Return: a positive number nand_bbt_block_status status or -%ERANGE if @entry
* is bigger than the BBT size.
*/
int nanddev_bbt_get_block_status(const struct nand_device *nand,
unsigned int entry)
{
unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
unsigned long *pos = nand->bbt.cache +
((entry * bits_per_block) / BITS_PER_LONG);
unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
unsigned long status;
if (entry >= nanddev_neraseblocks(nand))
return -ERANGE;
status = pos[0] >> offs;
if (bits_per_block + offs > BITS_PER_LONG)
status |= pos[1] << (BITS_PER_LONG - offs);
return status & GENMASK(bits_per_block - 1, 0);
}
EXPORT_SYMBOL_GPL(nanddev_bbt_get_block_status);
/**
* nanddev_bbt_set_block_status() - Update the status of an eraseblock in the
* in-memory BBT
* @nand: nand device
* @entry: the BBT entry to update
* @status: the new status
*
* Update an entry of the in-memory BBT. If you want to push the updated BBT
* the NAND you should call nanddev_bbt_update().
*
* Return: 0 in case of success or -%ERANGE if @entry is bigger than the BBT
* size.
*/
int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
enum nand_bbt_block_status status)
{
unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
unsigned long *pos = nand->bbt.cache +
((entry * bits_per_block) / BITS_PER_LONG);
unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
unsigned long val = status & GENMASK(bits_per_block - 1, 0);
if (entry >= nanddev_neraseblocks(nand))
return -ERANGE;
pos[0] &= ~GENMASK(offs + bits_per_block - 1, offs);
pos[0] |= val << offs;
if (bits_per_block + offs > BITS_PER_LONG) {
unsigned int rbits = bits_per_block + offs - BITS_PER_LONG;
pos[1] &= ~GENMASK(rbits - 1, 0);
pos[1] |= val >> (bits_per_block - rbits);
}
return 0;
}
EXPORT_SYMBOL_GPL(nanddev_bbt_set_block_status);
| linux-master | drivers/mtd/nand/bbt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Bad Block Table support for the OneNAND driver
*
* Copyright(c) 2005 Samsung Electronics
* Kyungmin Park <[email protected]>
*
* Derived from nand_bbt.c
*
* TODO:
* Split BBT core and chip specific BBT.
*/
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/export.h>
/**
* check_short_pattern - [GENERIC] check if a pattern is in the buffer
* @buf: the buffer to search
* @len: the length of buffer to search
* @paglen: the pagelength
* @td: search pattern descriptor
*
* Check for a pattern at the given place. Used to search bad block
* tables and good / bad block identifiers. Same as check_pattern, but
* no optional empty check and the pattern is expected to start
* at offset 0.
*
*/
static int check_short_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
{
int i;
uint8_t *p = buf;
/* Compare the pattern */
for (i = 0; i < td->len; i++) {
if (p[i] != td->pattern[i])
return -1;
}
return 0;
}
/**
* create_bbt - [GENERIC] Create a bad block table by scanning the device
* @mtd: MTD device structure
* @buf: temporary buffer
* @bd: descriptor for the good/bad block search pattern
* @chip: create the table for a specific chip, -1 read all chips.
* Applies only if NAND_BBT_PERCHIP option is set
*
* Create a bad block table by scanning the device
* for the given good/bad block identify pattern
*/
static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd, int chip)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm = this->bbm;
int i, j, numblocks, len, scanlen;
int startblock;
loff_t from;
size_t readlen;
struct mtd_oob_ops ops = { };
int rgn;
printk(KERN_INFO "Scanning device for bad blocks\n");
len = 2;
/* We need only read few bytes from the OOB area */
scanlen = 0;
readlen = bd->len;
/* chip == -1 case only */
/* Note that numblocks is 2 * (real numblocks) here;
* see i += 2 below as it makses shifting and masking less painful
*/
numblocks = this->chipsize >> (bbm->bbt_erase_shift - 1);
startblock = 0;
from = 0;
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooblen = readlen;
ops.oobbuf = buf;
ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
for (i = startblock; i < numblocks; ) {
int ret;
for (j = 0; j < len; j++) {
/* No need to read pages fully,
* just read required OOB bytes */
ret = onenand_bbt_read_oob(mtd,
from + j * this->writesize + bd->offs, &ops);
/* If it is a initial bad block, just ignore it */
if (ret == ONENAND_BBT_READ_FATAL_ERROR)
return -EIO;
if (ret || check_short_pattern(&buf[j * scanlen],
scanlen, this->writesize, bd)) {
bbm->bbt[i >> 3] |= 0x03 << (i & 0x6);
printk(KERN_INFO "OneNAND eraseblock %d is an "
"initial bad block\n", i >> 1);
mtd->ecc_stats.badblocks++;
break;
}
}
i += 2;
if (FLEXONENAND(this)) {
rgn = flexonenand_region(mtd, from);
from += mtd->eraseregions[rgn].erasesize;
} else
from += (1 << bbm->bbt_erase_shift);
}
return 0;
}
/**
* onenand_memory_bbt - [GENERIC] create a memory based bad block table
* @mtd: MTD device structure
* @bd: descriptor for the good/bad block search pattern
*
* The function creates a memory based bbt by scanning the device
* for manufacturer / software marked good / bad blocks
*/
static inline int onenand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct onenand_chip *this = mtd->priv;
return create_bbt(mtd, this->page_buf, bd, -1);
}
/**
* onenand_isbad_bbt - [OneNAND Interface] Check if a block is bad
* @mtd: MTD device structure
* @offs: offset in the device
* @allowbbt: allow access to bad block table region
*/
static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm = this->bbm;
int block;
uint8_t res;
/* Get block number * 2 */
block = (int) (onenand_block(this, offs) << 1);
res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
pr_debug("onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
(unsigned int) offs, block >> 1, res);
switch ((int) res) {
case 0x00: return 0;
case 0x01: return 1;
case 0x02: return allowbbt ? 0 : 1;
}
return 1;
}
/**
* onenand_scan_bbt - [OneNAND Interface] scan, find, read and maybe create bad block table(s)
* @mtd: MTD device structure
* @bd: descriptor for the good/bad block search pattern
*
* The function checks, if a bad block table(s) is/are already
* available. If not it scans the device for manufacturer
* marked good / bad blocks and writes the bad block table(s) to
* the selected place.
*
* The bad block table memory is allocated here. It is freed
* by the onenand_release function.
*
*/
static int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm = this->bbm;
int len, ret = 0;
len = this->chipsize >> (this->erase_shift + 2);
/* Allocate memory (2bit per block) and clear the memory bad block table */
bbm->bbt = kzalloc(len, GFP_KERNEL);
if (!bbm->bbt)
return -ENOMEM;
/* Set erase shift */
bbm->bbt_erase_shift = this->erase_shift;
if (!bbm->isbad_bbt)
bbm->isbad_bbt = onenand_isbad_bbt;
/* Scan the device to build a memory based bad block table */
if ((ret = onenand_memory_bbt(mtd, bd))) {
printk(KERN_ERR "onenand_scan_bbt: Can't scan flash and build the RAM-based BBT\n");
kfree(bbm->bbt);
bbm->bbt = NULL;
}
return ret;
}
/*
* Define some generic bad / good block scan pattern which are used
* while scanning a device for factory marked good / bad blocks.
*/
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
static struct nand_bbt_descr largepage_memorybased = {
.options = 0,
.offs = 0,
.len = 2,
.pattern = scan_ff_pattern,
};
/**
* onenand_default_bbt - [OneNAND Interface] Select a default bad block table for the device
* @mtd: MTD device structure
*
* This function selects the default bad block table
* support for the device and calls the onenand_scan_bbt function
*/
int onenand_default_bbt(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm;
this->bbm = kzalloc(sizeof(struct bbm_info), GFP_KERNEL);
if (!this->bbm)
return -ENOMEM;
bbm = this->bbm;
/* 1KB page has same configuration as 2KB page */
if (!bbm->badblock_pattern)
bbm->badblock_pattern = &largepage_memorybased;
return onenand_scan_bbt(mtd, bbm->badblock_pattern);
}
| linux-master | drivers/mtd/nand/onenand/onenand_bbt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Samsung S3C64XX/S5PC1XX OneNAND driver
*
* Copyright © 2008-2010 Samsung Electronics
* Kyungmin Park <[email protected]>
* Marek Szyprowski <[email protected]>
*
* Implementation:
* S3C64XX: emulate the pseudo BufferRAM
* S5PC110: use DMA
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include "samsung.h"
enum soc_type {
TYPE_S3C6400,
TYPE_S3C6410,
TYPE_S5PC110,
};
#define ONENAND_ERASE_STATUS 0x00
#define ONENAND_MULTI_ERASE_SET 0x01
#define ONENAND_ERASE_START 0x03
#define ONENAND_UNLOCK_START 0x08
#define ONENAND_UNLOCK_END 0x09
#define ONENAND_LOCK_START 0x0A
#define ONENAND_LOCK_END 0x0B
#define ONENAND_LOCK_TIGHT_START 0x0C
#define ONENAND_LOCK_TIGHT_END 0x0D
#define ONENAND_UNLOCK_ALL 0x0E
#define ONENAND_OTP_ACCESS 0x12
#define ONENAND_SPARE_ACCESS_ONLY 0x13
#define ONENAND_MAIN_ACCESS_ONLY 0x14
#define ONENAND_ERASE_VERIFY 0x15
#define ONENAND_MAIN_SPARE_ACCESS 0x16
#define ONENAND_PIPELINE_READ 0x4000
#define MAP_00 (0x0)
#define MAP_01 (0x1)
#define MAP_10 (0x2)
#define MAP_11 (0x3)
#define S3C64XX_CMD_MAP_SHIFT 24
#define S3C6400_FBA_SHIFT 10
#define S3C6400_FPA_SHIFT 4
#define S3C6400_FSA_SHIFT 2
#define S3C6410_FBA_SHIFT 12
#define S3C6410_FPA_SHIFT 6
#define S3C6410_FSA_SHIFT 4
/* S5PC110 specific definitions */
#define S5PC110_DMA_SRC_ADDR 0x400
#define S5PC110_DMA_SRC_CFG 0x404
#define S5PC110_DMA_DST_ADDR 0x408
#define S5PC110_DMA_DST_CFG 0x40C
#define S5PC110_DMA_TRANS_SIZE 0x414
#define S5PC110_DMA_TRANS_CMD 0x418
#define S5PC110_DMA_TRANS_STATUS 0x41C
#define S5PC110_DMA_TRANS_DIR 0x420
#define S5PC110_INTC_DMA_CLR 0x1004
#define S5PC110_INTC_ONENAND_CLR 0x1008
#define S5PC110_INTC_DMA_MASK 0x1024
#define S5PC110_INTC_ONENAND_MASK 0x1028
#define S5PC110_INTC_DMA_PEND 0x1044
#define S5PC110_INTC_ONENAND_PEND 0x1048
#define S5PC110_INTC_DMA_STATUS 0x1064
#define S5PC110_INTC_ONENAND_STATUS 0x1068
#define S5PC110_INTC_DMA_TD (1 << 24)
#define S5PC110_INTC_DMA_TE (1 << 16)
#define S5PC110_DMA_CFG_SINGLE (0x0 << 16)
#define S5PC110_DMA_CFG_4BURST (0x2 << 16)
#define S5PC110_DMA_CFG_8BURST (0x3 << 16)
#define S5PC110_DMA_CFG_16BURST (0x4 << 16)
#define S5PC110_DMA_CFG_INC (0x0 << 8)
#define S5PC110_DMA_CFG_CNT (0x1 << 8)
#define S5PC110_DMA_CFG_8BIT (0x0 << 0)
#define S5PC110_DMA_CFG_16BIT (0x1 << 0)
#define S5PC110_DMA_CFG_32BIT (0x2 << 0)
#define S5PC110_DMA_SRC_CFG_READ (S5PC110_DMA_CFG_16BURST | \
S5PC110_DMA_CFG_INC | \
S5PC110_DMA_CFG_16BIT)
#define S5PC110_DMA_DST_CFG_READ (S5PC110_DMA_CFG_16BURST | \
S5PC110_DMA_CFG_INC | \
S5PC110_DMA_CFG_32BIT)
#define S5PC110_DMA_SRC_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
S5PC110_DMA_CFG_INC | \
S5PC110_DMA_CFG_32BIT)
#define S5PC110_DMA_DST_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
S5PC110_DMA_CFG_INC | \
S5PC110_DMA_CFG_16BIT)
#define S5PC110_DMA_TRANS_CMD_TDC (0x1 << 18)
#define S5PC110_DMA_TRANS_CMD_TEC (0x1 << 16)
#define S5PC110_DMA_TRANS_CMD_TR (0x1 << 0)
#define S5PC110_DMA_TRANS_STATUS_TD (0x1 << 18)
#define S5PC110_DMA_TRANS_STATUS_TB (0x1 << 17)
#define S5PC110_DMA_TRANS_STATUS_TE (0x1 << 16)
#define S5PC110_DMA_DIR_READ 0x0
#define S5PC110_DMA_DIR_WRITE 0x1
struct s3c_onenand {
struct mtd_info *mtd;
struct platform_device *pdev;
enum soc_type type;
void __iomem *base;
void __iomem *ahb_addr;
int bootram_command;
void *page_buf;
void *oob_buf;
unsigned int (*mem_addr)(int fba, int fpa, int fsa);
unsigned int (*cmd_map)(unsigned int type, unsigned int val);
void __iomem *dma_addr;
unsigned long phys_base;
struct completion complete;
};
#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
#define CMD_MAP_01(dev, mem_addr) (dev->cmd_map(MAP_01, (mem_addr)))
#define CMD_MAP_10(dev, mem_addr) (dev->cmd_map(MAP_10, (mem_addr)))
#define CMD_MAP_11(dev, addr) (dev->cmd_map(MAP_11, ((addr) << 2)))
static struct s3c_onenand *onenand;
static inline int s3c_read_reg(int offset)
{
return readl(onenand->base + offset);
}
static inline void s3c_write_reg(int value, int offset)
{
writel(value, onenand->base + offset);
}
static inline int s3c_read_cmd(unsigned int cmd)
{
return readl(onenand->ahb_addr + cmd);
}
static inline void s3c_write_cmd(int value, unsigned int cmd)
{
writel(value, onenand->ahb_addr + cmd);
}
#ifdef SAMSUNG_DEBUG
static void s3c_dump_reg(void)
{
int i;
for (i = 0; i < 0x400; i += 0x40) {
printk(KERN_INFO "0x%08X: 0x%08x 0x%08x 0x%08x 0x%08x\n",
(unsigned int) onenand->base + i,
s3c_read_reg(i), s3c_read_reg(i + 0x10),
s3c_read_reg(i + 0x20), s3c_read_reg(i + 0x30));
}
}
#endif
static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val)
{
return (type << S3C64XX_CMD_MAP_SHIFT) | val;
}
static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa)
{
return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) |
(fsa << S3C6400_FSA_SHIFT);
}
static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa)
{
return (fba << S3C6410_FBA_SHIFT) | (fpa << S3C6410_FPA_SHIFT) |
(fsa << S3C6410_FSA_SHIFT);
}
static void s3c_onenand_reset(void)
{
unsigned long timeout = 0x10000;
int stat;
s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
while (1 && timeout--) {
stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
if (stat & RST_CMP)
break;
}
stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
/* Clear interrupt */
s3c_write_reg(0x0, INT_ERR_ACK_OFFSET);
/* Clear the ECC status */
s3c_write_reg(0x0, ECC_ERR_STAT_OFFSET);
}
static unsigned short s3c_onenand_readw(void __iomem *addr)
{
struct onenand_chip *this = onenand->mtd->priv;
struct device *dev = &onenand->pdev->dev;
int reg = addr - this->base;
int word_addr = reg >> 1;
int value;
/* It's used for probing time */
switch (reg) {
case ONENAND_REG_MANUFACTURER_ID:
return s3c_read_reg(MANUFACT_ID_OFFSET);
case ONENAND_REG_DEVICE_ID:
return s3c_read_reg(DEVICE_ID_OFFSET);
case ONENAND_REG_VERSION_ID:
return s3c_read_reg(FLASH_VER_ID_OFFSET);
case ONENAND_REG_DATA_BUFFER_SIZE:
return s3c_read_reg(DATA_BUF_SIZE_OFFSET);
case ONENAND_REG_TECHNOLOGY:
return s3c_read_reg(TECH_OFFSET);
case ONENAND_REG_SYS_CFG1:
return s3c_read_reg(MEM_CFG_OFFSET);
/* Used at unlock all status */
case ONENAND_REG_CTRL_STATUS:
return 0;
case ONENAND_REG_WP_STATUS:
return ONENAND_WP_US;
default:
break;
}
/* BootRAM access control */
if ((unsigned long)addr < ONENAND_DATARAM && onenand->bootram_command) {
if (word_addr == 0)
return s3c_read_reg(MANUFACT_ID_OFFSET);
if (word_addr == 1)
return s3c_read_reg(DEVICE_ID_OFFSET);
if (word_addr == 2)
return s3c_read_reg(FLASH_VER_ID_OFFSET);
}
value = s3c_read_cmd(CMD_MAP_11(onenand, word_addr)) & 0xffff;
dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
word_addr, value);
return value;
}
static void s3c_onenand_writew(unsigned short value, void __iomem *addr)
{
struct onenand_chip *this = onenand->mtd->priv;
struct device *dev = &onenand->pdev->dev;
unsigned int reg = addr - this->base;
unsigned int word_addr = reg >> 1;
/* It's used for probing time */
switch (reg) {
case ONENAND_REG_SYS_CFG1:
s3c_write_reg(value, MEM_CFG_OFFSET);
return;
case ONENAND_REG_START_ADDRESS1:
case ONENAND_REG_START_ADDRESS2:
return;
/* Lock/lock-tight/unlock/unlock_all */
case ONENAND_REG_START_BLOCK_ADDRESS:
return;
default:
break;
}
/* BootRAM access control */
if ((unsigned long)addr < ONENAND_DATARAM) {
if (value == ONENAND_CMD_READID) {
onenand->bootram_command = 1;
return;
}
if (value == ONENAND_CMD_RESET) {
s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
onenand->bootram_command = 0;
return;
}
}
dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
word_addr, value);
s3c_write_cmd(value, CMD_MAP_11(onenand, word_addr));
}
static int s3c_onenand_wait(struct mtd_info *mtd, int state)
{
struct device *dev = &onenand->pdev->dev;
unsigned int flags = INT_ACT;
unsigned int stat, ecc;
unsigned long timeout;
switch (state) {
case FL_READING:
flags |= BLK_RW_CMP | LOAD_CMP;
break;
case FL_WRITING:
flags |= BLK_RW_CMP | PGM_CMP;
break;
case FL_ERASING:
flags |= BLK_RW_CMP | ERS_CMP;
break;
case FL_LOCKING:
flags |= BLK_RW_CMP;
break;
default:
break;
}
/* The 20 msec is enough */
timeout = jiffies + msecs_to_jiffies(20);
while (time_before(jiffies, timeout)) {
stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
if (stat & flags)
break;
if (state != FL_READING)
cond_resched();
}
/* To get correct interrupt status in timeout case */
stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
/*
* In the Spec. it checks the controller status first
* However if you get the correct information in case of
* power off recovery (POR) test, it should read ECC status first
*/
if (stat & LOAD_CMP) {
ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
dev_info(dev, "%s: ECC error = 0x%04x\n", __func__,
ecc);
mtd->ecc_stats.failed++;
return -EBADMSG;
}
}
if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) {
dev_info(dev, "%s: controller error = 0x%04x\n", __func__,
stat);
if (stat & LOCKED_BLK)
dev_info(dev, "%s: it's locked error = 0x%04x\n",
__func__, stat);
return -EIO;
}
return 0;
}
static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
size_t len)
{
struct onenand_chip *this = mtd->priv;
unsigned int *m, *s;
int fba, fpa, fsa = 0;
unsigned int mem_addr, cmd_map_01, cmd_map_10;
int i, mcount, scount;
int index;
fba = (int) (addr >> this->erase_shift);
fpa = (int) (addr >> this->page_shift);
fpa &= this->page_mask;
mem_addr = onenand->mem_addr(fba, fpa, fsa);
cmd_map_01 = CMD_MAP_01(onenand, mem_addr);
cmd_map_10 = CMD_MAP_10(onenand, mem_addr);
switch (cmd) {
case ONENAND_CMD_READ:
case ONENAND_CMD_READOOB:
case ONENAND_CMD_BUFFERRAM:
ONENAND_SET_NEXT_BUFFERRAM(this);
break;
default:
break;
}
index = ONENAND_CURRENT_BUFFERRAM(this);
/*
* Emulate Two BufferRAMs and access with 4 bytes pointer
*/
m = onenand->page_buf;
s = onenand->oob_buf;
if (index) {
m += (this->writesize >> 2);
s += (mtd->oobsize >> 2);
}
mcount = mtd->writesize >> 2;
scount = mtd->oobsize >> 2;
switch (cmd) {
case ONENAND_CMD_READ:
/* Main */
for (i = 0; i < mcount; i++)
*m++ = s3c_read_cmd(cmd_map_01);
return 0;
case ONENAND_CMD_READOOB:
s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
/* Main */
for (i = 0; i < mcount; i++)
*m++ = s3c_read_cmd(cmd_map_01);
/* Spare */
for (i = 0; i < scount; i++)
*s++ = s3c_read_cmd(cmd_map_01);
s3c_write_reg(0, TRANS_SPARE_OFFSET);
return 0;
case ONENAND_CMD_PROG:
/* Main */
for (i = 0; i < mcount; i++)
s3c_write_cmd(*m++, cmd_map_01);
return 0;
case ONENAND_CMD_PROGOOB:
s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
/* Main - dummy write */
for (i = 0; i < mcount; i++)
s3c_write_cmd(0xffffffff, cmd_map_01);
/* Spare */
for (i = 0; i < scount; i++)
s3c_write_cmd(*s++, cmd_map_01);
s3c_write_reg(0, TRANS_SPARE_OFFSET);
return 0;
case ONENAND_CMD_UNLOCK_ALL:
s3c_write_cmd(ONENAND_UNLOCK_ALL, cmd_map_10);
return 0;
case ONENAND_CMD_ERASE:
s3c_write_cmd(ONENAND_ERASE_START, cmd_map_10);
return 0;
default:
break;
}
return 0;
}
static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
{
struct onenand_chip *this = mtd->priv;
int index = ONENAND_CURRENT_BUFFERRAM(this);
unsigned char *p;
if (area == ONENAND_DATARAM) {
p = onenand->page_buf;
if (index == 1)
p += this->writesize;
} else {
p = onenand->oob_buf;
if (index == 1)
p += mtd->oobsize;
}
return p;
}
static int onenand_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset,
size_t count)
{
unsigned char *p;
p = s3c_get_bufferram(mtd, area);
memcpy(buffer, p + offset, count);
return 0;
}
static int onenand_write_bufferram(struct mtd_info *mtd, int area,
const unsigned char *buffer, int offset,
size_t count)
{
unsigned char *p;
p = s3c_get_bufferram(mtd, area);
memcpy(p + offset, buffer, count);
return 0;
}
static int (*s5pc110_dma_ops)(dma_addr_t dst, dma_addr_t src, size_t count, int direction);
static int s5pc110_dma_poll(dma_addr_t dst, dma_addr_t src, size_t count, int direction)
{
void __iomem *base = onenand->dma_addr;
int status;
unsigned long timeout;
writel(src, base + S5PC110_DMA_SRC_ADDR);
writel(dst, base + S5PC110_DMA_DST_ADDR);
if (direction == S5PC110_DMA_DIR_READ) {
writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
} else {
writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
}
writel(count, base + S5PC110_DMA_TRANS_SIZE);
writel(direction, base + S5PC110_DMA_TRANS_DIR);
writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
/*
* There's no exact timeout values at Spec.
* In real case it takes under 1 msec.
* So 20 msecs are enough.
*/
timeout = jiffies + msecs_to_jiffies(20);
do {
status = readl(base + S5PC110_DMA_TRANS_STATUS);
if (status & S5PC110_DMA_TRANS_STATUS_TE) {
writel(S5PC110_DMA_TRANS_CMD_TEC,
base + S5PC110_DMA_TRANS_CMD);
return -EIO;
}
} while (!(status & S5PC110_DMA_TRANS_STATUS_TD) &&
time_before(jiffies, timeout));
writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
return 0;
}
static irqreturn_t s5pc110_onenand_irq(int irq, void *data)
{
void __iomem *base = onenand->dma_addr;
int status, cmd = 0;
status = readl(base + S5PC110_INTC_DMA_STATUS);
if (likely(status & S5PC110_INTC_DMA_TD))
cmd = S5PC110_DMA_TRANS_CMD_TDC;
if (unlikely(status & S5PC110_INTC_DMA_TE))
cmd = S5PC110_DMA_TRANS_CMD_TEC;
writel(cmd, base + S5PC110_DMA_TRANS_CMD);
writel(status, base + S5PC110_INTC_DMA_CLR);
if (!onenand->complete.done)
complete(&onenand->complete);
return IRQ_HANDLED;
}
static int s5pc110_dma_irq(dma_addr_t dst, dma_addr_t src, size_t count, int direction)
{
void __iomem *base = onenand->dma_addr;
int status;
status = readl(base + S5PC110_INTC_DMA_MASK);
if (status) {
status &= ~(S5PC110_INTC_DMA_TD | S5PC110_INTC_DMA_TE);
writel(status, base + S5PC110_INTC_DMA_MASK);
}
writel(src, base + S5PC110_DMA_SRC_ADDR);
writel(dst, base + S5PC110_DMA_DST_ADDR);
if (direction == S5PC110_DMA_DIR_READ) {
writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
} else {
writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
}
writel(count, base + S5PC110_DMA_TRANS_SIZE);
writel(direction, base + S5PC110_DMA_TRANS_DIR);
writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
wait_for_completion_timeout(&onenand->complete, msecs_to_jiffies(20));
return 0;
}
static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset, size_t count)
{
struct onenand_chip *this = mtd->priv;
void __iomem *p;
void *buf = (void *) buffer;
dma_addr_t dma_src, dma_dst;
int err, ofs, page_dma = 0;
struct device *dev = &onenand->pdev->dev;
p = this->base + area;
if (ONENAND_CURRENT_BUFFERRAM(this)) {
if (area == ONENAND_DATARAM)
p += this->writesize;
else
p += mtd->oobsize;
}
if (offset & 3 || (size_t) buf & 3 ||
!onenand->dma_addr || count != mtd->writesize)
goto normal;
/* Handle vmalloc address */
if (buf >= high_memory) {
struct page *page;
if (((size_t) buf & PAGE_MASK) !=
((size_t) (buf + count - 1) & PAGE_MASK))
goto normal;
page = vmalloc_to_page(buf);
if (!page)
goto normal;
/* Page offset */
ofs = ((size_t) buf & ~PAGE_MASK);
page_dma = 1;
/* DMA routine */
dma_src = onenand->phys_base + (p - this->base);
dma_dst = dma_map_page(dev, page, ofs, count, DMA_FROM_DEVICE);
} else {
/* DMA routine */
dma_src = onenand->phys_base + (p - this->base);
dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
}
if (dma_mapping_error(dev, dma_dst)) {
dev_err(dev, "Couldn't map a %zu byte buffer for DMA\n", count);
goto normal;
}
err = s5pc110_dma_ops(dma_dst, dma_src,
count, S5PC110_DMA_DIR_READ);
if (page_dma)
dma_unmap_page(dev, dma_dst, count, DMA_FROM_DEVICE);
else
dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
if (!err)
return 0;
normal:
if (count != mtd->writesize) {
/* Copy the bufferram to memory to prevent unaligned access */
memcpy_fromio(this->page_buf, p, mtd->writesize);
memcpy(buffer, this->page_buf + offset, count);
} else {
memcpy_fromio(buffer, p, count);
}
return 0;
}
static int s5pc110_chip_probe(struct mtd_info *mtd)
{
/* Now just return 0 */
return 0;
}
static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state)
{
unsigned int flags = INT_ACT | LOAD_CMP;
unsigned int stat;
unsigned long timeout;
/* The 20 msec is enough */
timeout = jiffies + msecs_to_jiffies(20);
while (time_before(jiffies, timeout)) {
stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
if (stat & flags)
break;
}
/* To get correct interrupt status in timeout case */
stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
if (stat & LD_FAIL_ECC_ERR) {
s3c_onenand_reset();
return ONENAND_BBT_READ_ERROR;
}
if (stat & LOAD_CMP) {
int ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
s3c_onenand_reset();
return ONENAND_BBT_READ_ERROR;
}
}
return 0;
}
static void s3c_onenand_check_lock_status(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
struct device *dev = &onenand->pdev->dev;
unsigned int block, end;
end = this->chipsize >> this->erase_shift;
for (block = 0; block < end; block++) {
unsigned int mem_addr = onenand->mem_addr(block, 0, 0);
s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) {
dev_err(dev, "block %d is write-protected!\n", block);
s3c_write_reg(LOCKED_BLK, INT_ERR_ACK_OFFSET);
}
}
}
static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs,
size_t len, int cmd)
{
struct onenand_chip *this = mtd->priv;
int start, end, start_mem_addr, end_mem_addr;
start = ofs >> this->erase_shift;
start_mem_addr = onenand->mem_addr(start, 0, 0);
end = start + (len >> this->erase_shift) - 1;
end_mem_addr = onenand->mem_addr(end, 0, 0);
if (cmd == ONENAND_CMD_LOCK) {
s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(onenand,
start_mem_addr));
s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(onenand,
end_mem_addr));
} else {
s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(onenand,
start_mem_addr));
s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(onenand,
end_mem_addr));
}
this->wait(mtd, FL_LOCKING);
}
static void s3c_unlock_all(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
loff_t ofs = 0;
size_t len = this->chipsize;
if (this->options & ONENAND_HAS_UNLOCK_ALL) {
/* Write unlock command */
this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
/* No need to check return value */
this->wait(mtd, FL_LOCKING);
/* Workaround for all block unlock in DDP */
if (!ONENAND_IS_DDP(this)) {
s3c_onenand_check_lock_status(mtd);
return;
}
/* All blocks on another chip */
ofs = this->chipsize >> 1;
len = this->chipsize >> 1;
}
s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
s3c_onenand_check_lock_status(mtd);
}
static void s3c_onenand_setup(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
onenand->mtd = mtd;
if (onenand->type == TYPE_S3C6400) {
onenand->mem_addr = s3c6400_mem_addr;
onenand->cmd_map = s3c64xx_cmd_map;
} else if (onenand->type == TYPE_S3C6410) {
onenand->mem_addr = s3c6410_mem_addr;
onenand->cmd_map = s3c64xx_cmd_map;
} else if (onenand->type == TYPE_S5PC110) {
/* Use generic onenand functions */
this->read_bufferram = s5pc110_read_bufferram;
this->chip_probe = s5pc110_chip_probe;
return;
} else {
BUG();
}
this->read_word = s3c_onenand_readw;
this->write_word = s3c_onenand_writew;
this->wait = s3c_onenand_wait;
this->bbt_wait = s3c_onenand_bbt_wait;
this->unlock_all = s3c_unlock_all;
this->command = s3c_onenand_command;
this->read_bufferram = onenand_read_bufferram;
this->write_bufferram = onenand_write_bufferram;
}
static int s3c_onenand_probe(struct platform_device *pdev)
{
struct onenand_platform_data *pdata;
struct onenand_chip *this;
struct mtd_info *mtd;
struct resource *r;
int size, err;
pdata = dev_get_platdata(&pdev->dev);
/* No need to check pdata. the platform data is optional */
size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
mtd = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (!mtd)
return -ENOMEM;
onenand = devm_kzalloc(&pdev->dev, sizeof(struct s3c_onenand),
GFP_KERNEL);
if (!onenand)
return -ENOMEM;
this = (struct onenand_chip *) &mtd[1];
mtd->priv = this;
mtd->dev.parent = &pdev->dev;
onenand->pdev = pdev;
onenand->type = platform_get_device_id(pdev)->driver_data;
s3c_onenand_setup(mtd);
onenand->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(onenand->base))
return PTR_ERR(onenand->base);
onenand->phys_base = r->start;
/* Set onenand_chip also */
this->base = onenand->base;
/* Use runtime badblock check */
this->options |= ONENAND_SKIP_UNLOCK_CHECK;
if (onenand->type != TYPE_S5PC110) {
onenand->ahb_addr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(onenand->ahb_addr))
return PTR_ERR(onenand->ahb_addr);
/* Allocate 4KiB BufferRAM */
onenand->page_buf = devm_kzalloc(&pdev->dev, SZ_4K,
GFP_KERNEL);
if (!onenand->page_buf)
return -ENOMEM;
/* Allocate 128 SpareRAM */
onenand->oob_buf = devm_kzalloc(&pdev->dev, 128, GFP_KERNEL);
if (!onenand->oob_buf)
return -ENOMEM;
/* S3C doesn't handle subpage write */
mtd->subpage_sft = 0;
this->subpagesize = mtd->writesize;
} else { /* S5PC110 */
onenand->dma_addr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(onenand->dma_addr))
return PTR_ERR(onenand->dma_addr);
s5pc110_dma_ops = s5pc110_dma_poll;
/* Interrupt support */
r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (r) {
init_completion(&onenand->complete);
s5pc110_dma_ops = s5pc110_dma_irq;
err = devm_request_irq(&pdev->dev, r->start,
s5pc110_onenand_irq,
IRQF_SHARED, "onenand",
&onenand);
if (err) {
dev_err(&pdev->dev, "failed to get irq\n");
return err;
}
}
}
err = onenand_scan(mtd, 1);
if (err)
return err;
if (onenand->type != TYPE_S5PC110) {
/* S3C doesn't handle subpage write */
mtd->subpage_sft = 0;
this->subpagesize = mtd->writesize;
}
if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
err = mtd_device_register(mtd, pdata ? pdata->parts : NULL,
pdata ? pdata->nr_parts : 0);
if (err) {
dev_err(&pdev->dev, "failed to parse partitions and register the MTD device\n");
onenand_release(mtd);
return err;
}
platform_set_drvdata(pdev, mtd);
return 0;
}
static void s3c_onenand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
onenand_release(mtd);
}
static int s3c_pm_ops_suspend(struct device *dev)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct onenand_chip *this = mtd->priv;
this->wait(mtd, FL_PM_SUSPENDED);
return 0;
}
static int s3c_pm_ops_resume(struct device *dev)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct onenand_chip *this = mtd->priv;
this->unlock_all(mtd);
return 0;
}
static const struct dev_pm_ops s3c_pm_ops = {
.suspend = s3c_pm_ops_suspend,
.resume = s3c_pm_ops_resume,
};
static const struct platform_device_id s3c_onenand_driver_ids[] = {
{
.name = "s3c6400-onenand",
.driver_data = TYPE_S3C6400,
}, {
.name = "s3c6410-onenand",
.driver_data = TYPE_S3C6410,
}, {
.name = "s5pc110-onenand",
.driver_data = TYPE_S5PC110,
}, { },
};
MODULE_DEVICE_TABLE(platform, s3c_onenand_driver_ids);
static struct platform_driver s3c_onenand_driver = {
.driver = {
.name = "samsung-onenand",
.pm = &s3c_pm_ops,
},
.id_table = s3c_onenand_driver_ids,
.probe = s3c_onenand_probe,
.remove_new = s3c_onenand_remove,
};
module_platform_driver(s3c_onenand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kyungmin Park <[email protected]>");
MODULE_DESCRIPTION("Samsung OneNAND controller support");
| linux-master | drivers/mtd/nand/onenand/onenand_samsung.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2005-2009 Samsung Electronics
* Copyright © 2007 Nokia Corporation
*
* Kyungmin Park <[email protected]>
*
* Credits:
* Adrian Hunter <[email protected]>:
* auto-placement support, read-while load support, various fixes
*
* Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
* Flex-OneNAND support
* Amul Kumar Saha <amul.saha at samsung.com>
* OTP support
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
/*
* Multiblock erase if number of blocks to erase is 2 or more.
* Maximum number of blocks for simultaneous erase is 64.
*/
#define MB_ERASE_MIN_BLK_COUNT 2
#define MB_ERASE_MAX_BLK_COUNT 64
/* Default Flex-OneNAND boundary and lock respectively */
static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 };
module_param_array(flex_bdry, int, NULL, 0400);
MODULE_PARM_DESC(flex_bdry, "SLC Boundary information for Flex-OneNAND"
"Syntax:flex_bdry=DIE_BDRY,LOCK,..."
"DIE_BDRY: SLC boundary of the die"
"LOCK: Locking information for SLC boundary"
" : 0->Set boundary in unlocked status"
" : 1->Set boundary in locked status");
/* Default OneNAND/Flex-OneNAND OTP options*/
static int otp;
module_param(otp, int, 0400);
MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP"
"Syntax : otp=LOCK_TYPE"
"LOCK_TYPE : Keys issued, for specific OTP Lock type"
" : 0 -> Default (No Blocks Locked)"
" : 1 -> OTP Block lock"
" : 2 -> 1st Block lock"
" : 3 -> BOTH OTP Block and 1st Block lock");
/*
* flexonenand_oob_128 - oob info for Flex-Onenand with 4KB page
* For now, we expose only 64 out of 80 ecc bytes
*/
static int flexonenand_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section > 7)
return -ERANGE;
oobregion->offset = (section * 16) + 6;
oobregion->length = 10;
return 0;
}
static int flexonenand_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section > 7)
return -ERANGE;
oobregion->offset = (section * 16) + 2;
oobregion->length = 4;
return 0;
}
static const struct mtd_ooblayout_ops flexonenand_ooblayout_ops = {
.ecc = flexonenand_ooblayout_ecc,
.free = flexonenand_ooblayout_free,
};
/*
* onenand_oob_128 - oob info for OneNAND with 4KB page
*
* Based on specification:
* 4Gb M-die OneNAND Flash (KFM4G16Q4M, KFN8G16Q4M). Rev. 1.3, Apr. 2010
*
*/
static int onenand_ooblayout_128_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section > 7)
return -ERANGE;
oobregion->offset = (section * 16) + 7;
oobregion->length = 9;
return 0;
}
static int onenand_ooblayout_128_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section >= 8)
return -ERANGE;
/*
* free bytes are using the spare area fields marked as
* "Managed by internal ECC logic for Logical Sector Number area"
*/
oobregion->offset = (section * 16) + 2;
oobregion->length = 3;
return 0;
}
static const struct mtd_ooblayout_ops onenand_oob_128_ooblayout_ops = {
.ecc = onenand_ooblayout_128_ecc,
.free = onenand_ooblayout_128_free,
};
/*
* onenand_oob_32_64 - oob info for large (2KB) page
*/
static int onenand_ooblayout_32_64_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section > 3)
return -ERANGE;
oobregion->offset = (section * 16) + 8;
oobregion->length = 5;
return 0;
}
static int onenand_ooblayout_32_64_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
int sections = (mtd->oobsize / 32) * 2;
if (section >= sections)
return -ERANGE;
if (section & 1) {
oobregion->offset = ((section - 1) * 16) + 14;
oobregion->length = 2;
} else {
oobregion->offset = (section * 16) + 2;
oobregion->length = 3;
}
return 0;
}
static const struct mtd_ooblayout_ops onenand_oob_32_64_ooblayout_ops = {
.ecc = onenand_ooblayout_32_64_ecc,
.free = onenand_ooblayout_32_64_free,
};
static const unsigned char ffchars[] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 16 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 32 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 48 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 64 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 80 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 96 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 112 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 128 */
};
/**
* onenand_readw - [OneNAND Interface] Read OneNAND register
* @addr: address to read
*
* Read OneNAND register
*/
static unsigned short onenand_readw(void __iomem *addr)
{
return readw(addr);
}
/**
* onenand_writew - [OneNAND Interface] Write OneNAND register with value
* @value: value to write
* @addr: address to write
*
* Write OneNAND register with value
*/
static void onenand_writew(unsigned short value, void __iomem *addr)
{
writew(value, addr);
}
/**
* onenand_block_address - [DEFAULT] Get block address
* @this: onenand chip data structure
* @block: the block
* @return translated block address if DDP, otherwise same
*
* Setup Start Address 1 Register (F100h)
*/
static int onenand_block_address(struct onenand_chip *this, int block)
{
/* Device Flash Core select, NAND Flash Block Address */
if (block & this->density_mask)
return ONENAND_DDP_CHIP1 | (block ^ this->density_mask);
return block;
}
/**
* onenand_bufferram_address - [DEFAULT] Get bufferram address
* @this: onenand chip data structure
* @block: the block
* @return set DBS value if DDP, otherwise 0
*
* Setup Start Address 2 Register (F101h) for DDP
*/
static int onenand_bufferram_address(struct onenand_chip *this, int block)
{
/* Device BufferRAM Select */
if (block & this->density_mask)
return ONENAND_DDP_CHIP1;
return ONENAND_DDP_CHIP0;
}
/**
* onenand_page_address - [DEFAULT] Get page address
* @page: the page address
* @sector: the sector address
* @return combined page and sector address
*
* Setup Start Address 8 Register (F107h)
*/
static int onenand_page_address(int page, int sector)
{
/* Flash Page Address, Flash Sector Address */
int fpa, fsa;
fpa = page & ONENAND_FPA_MASK;
fsa = sector & ONENAND_FSA_MASK;
return ((fpa << ONENAND_FPA_SHIFT) | fsa);
}
/**
* onenand_buffer_address - [DEFAULT] Get buffer address
* @dataram1: DataRAM index
* @sectors: the sector address
* @count: the number of sectors
* Return: the start buffer value
*
* Setup Start Buffer Register (F200h)
*/
static int onenand_buffer_address(int dataram1, int sectors, int count)
{
int bsa, bsc;
/* BufferRAM Sector Address */
bsa = sectors & ONENAND_BSA_MASK;
if (dataram1)
bsa |= ONENAND_BSA_DATARAM1; /* DataRAM1 */
else
bsa |= ONENAND_BSA_DATARAM0; /* DataRAM0 */
/* BufferRAM Sector Count */
bsc = count & ONENAND_BSC_MASK;
return ((bsa << ONENAND_BSA_SHIFT) | bsc);
}
/**
* flexonenand_block- For given address return block number
* @this: - OneNAND device structure
* @addr: - Address for which block number is needed
*/
static unsigned flexonenand_block(struct onenand_chip *this, loff_t addr)
{
unsigned boundary, blk, die = 0;
if (ONENAND_IS_DDP(this) && addr >= this->diesize[0]) {
die = 1;
addr -= this->diesize[0];
}
boundary = this->boundary[die];
blk = addr >> (this->erase_shift - 1);
if (blk > boundary)
blk = (blk + boundary + 1) >> 1;
blk += die ? this->density_mask : 0;
return blk;
}
inline unsigned onenand_block(struct onenand_chip *this, loff_t addr)
{
if (!FLEXONENAND(this))
return addr >> this->erase_shift;
return flexonenand_block(this, addr);
}
/**
* flexonenand_addr - Return address of the block
* @this: OneNAND device structure
* @block: Block number on Flex-OneNAND
*
* Return address of the block
*/
static loff_t flexonenand_addr(struct onenand_chip *this, int block)
{
loff_t ofs = 0;
int die = 0, boundary;
if (ONENAND_IS_DDP(this) && block >= this->density_mask) {
block -= this->density_mask;
die = 1;
ofs = this->diesize[0];
}
boundary = this->boundary[die];
ofs += (loff_t)block << (this->erase_shift - 1);
if (block > (boundary + 1))
ofs += (loff_t)(block - boundary - 1) << (this->erase_shift - 1);
return ofs;
}
loff_t onenand_addr(struct onenand_chip *this, int block)
{
if (!FLEXONENAND(this))
return (loff_t)block << this->erase_shift;
return flexonenand_addr(this, block);
}
EXPORT_SYMBOL(onenand_addr);
/**
* onenand_get_density - [DEFAULT] Get OneNAND density
* @dev_id: OneNAND device ID
*
* Get OneNAND density from device ID
*/
static inline int onenand_get_density(int dev_id)
{
int density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT;
return (density & ONENAND_DEVICE_DENSITY_MASK);
}
/**
* flexonenand_region - [Flex-OneNAND] Return erase region of addr
* @mtd: MTD device structure
* @addr: address whose erase region needs to be identified
*/
int flexonenand_region(struct mtd_info *mtd, loff_t addr)
{
int i;
for (i = 0; i < mtd->numeraseregions; i++)
if (addr < mtd->eraseregions[i].offset)
break;
return i - 1;
}
EXPORT_SYMBOL(flexonenand_region);
/**
* onenand_command - [DEFAULT] Send command to OneNAND device
* @mtd: MTD device structure
* @cmd: the command to be sent
* @addr: offset to read from or write to
* @len: number of bytes to read or write
*
* Send command to OneNAND device. This function is used for middle/large page
* devices (1KB/2KB Bytes per page)
*/
static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len)
{
struct onenand_chip *this = mtd->priv;
int value, block, page;
/* Address translation */
switch (cmd) {
case ONENAND_CMD_UNLOCK:
case ONENAND_CMD_LOCK:
case ONENAND_CMD_LOCK_TIGHT:
case ONENAND_CMD_UNLOCK_ALL:
block = -1;
page = -1;
break;
case FLEXONENAND_CMD_PI_ACCESS:
/* addr contains die index */
block = addr * this->density_mask;
page = -1;
break;
case ONENAND_CMD_ERASE:
case ONENAND_CMD_MULTIBLOCK_ERASE:
case ONENAND_CMD_ERASE_VERIFY:
case ONENAND_CMD_BUFFERRAM:
case ONENAND_CMD_OTP_ACCESS:
block = onenand_block(this, addr);
page = -1;
break;
case FLEXONENAND_CMD_READ_PI:
cmd = ONENAND_CMD_READ;
block = addr * this->density_mask;
page = 0;
break;
default:
block = onenand_block(this, addr);
if (FLEXONENAND(this))
page = (int) (addr - onenand_addr(this, block))>>\
this->page_shift;
else
page = (int) (addr >> this->page_shift);
if (ONENAND_IS_2PLANE(this)) {
/* Make the even block number */
block &= ~1;
/* Is it the odd plane? */
if (addr & this->writesize)
block++;
page >>= 1;
}
page &= this->page_mask;
break;
}
/* NOTE: The setting order of the registers is very important! */
if (cmd == ONENAND_CMD_BUFFERRAM) {
/* Select DataRAM for DDP */
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this))
/* It is always BufferRAM0 */
ONENAND_SET_BUFFERRAM0(this);
else
/* Switch to the next data buffer */
ONENAND_SET_NEXT_BUFFERRAM(this);
return 0;
}
if (block != -1) {
/* Write 'DFS, FBA' of Flash */
value = onenand_block_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
/* Select DataRAM for DDP */
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
}
if (page != -1) {
/* Now we use page size operation */
int sectors = 0, count = 0;
int dataram;
switch (cmd) {
case FLEXONENAND_CMD_RECOVER_LSB:
case ONENAND_CMD_READ:
case ONENAND_CMD_READOOB:
if (ONENAND_IS_4KB_PAGE(this))
/* It is always BufferRAM0 */
dataram = ONENAND_SET_BUFFERRAM0(this);
else
dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
break;
default:
if (ONENAND_IS_2PLANE(this) && cmd == ONENAND_CMD_PROG)
cmd = ONENAND_CMD_2X_PROG;
dataram = ONENAND_CURRENT_BUFFERRAM(this);
break;
}
/* Write 'FPA, FSA' of Flash */
value = onenand_page_address(page, sectors);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS8);
/* Write 'BSA, BSC' of DataRAM */
value = onenand_buffer_address(dataram, sectors, count);
this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
}
/* Interrupt clear */
this->write_word(ONENAND_INT_CLEAR, this->base + ONENAND_REG_INTERRUPT);
/* Write command */
this->write_word(cmd, this->base + ONENAND_REG_COMMAND);
return 0;
}
/**
* onenand_read_ecc - return ecc status
* @this: onenand chip structure
*/
static inline int onenand_read_ecc(struct onenand_chip *this)
{
int ecc, i, result = 0;
if (!FLEXONENAND(this) && !ONENAND_IS_4KB_PAGE(this))
return this->read_word(this->base + ONENAND_REG_ECC_STATUS);
for (i = 0; i < 4; i++) {
ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i*2);
if (likely(!ecc))
continue;
if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR)
return ONENAND_ECC_2BIT_ALL;
else
result = ONENAND_ECC_1BIT_ALL;
}
return result;
}
/**
* onenand_wait - [DEFAULT] wait until the command is done
* @mtd: MTD device structure
* @state: state to select the max. timeout value
*
* Wait for command done. This applies to all OneNAND command
* Read can take up to 30us, erase up to 2ms and program up to 350us
* according to general OneNAND specs
*/
static int onenand_wait(struct mtd_info *mtd, int state)
{
struct onenand_chip * this = mtd->priv;
unsigned long timeout;
unsigned int flags = ONENAND_INT_MASTER;
unsigned int interrupt = 0;
unsigned int ctrl;
/* The 20 msec is enough */
timeout = jiffies + msecs_to_jiffies(20);
while (time_before(jiffies, timeout)) {
interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
if (interrupt & flags)
break;
if (state != FL_READING && state != FL_PREPARING_ERASE)
cond_resched();
}
/* To get correct interrupt status in timeout case */
interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
/*
* In the Spec. it checks the controller status first
* However if you get the correct information in case of
* power off recovery (POR) test, it should read ECC status first
*/
if (interrupt & ONENAND_INT_READ) {
int ecc = onenand_read_ecc(this);
if (ecc) {
if (ecc & ONENAND_ECC_2BIT_ALL) {
printk(KERN_ERR "%s: ECC error = 0x%04x\n",
__func__, ecc);
mtd->ecc_stats.failed++;
return -EBADMSG;
} else if (ecc & ONENAND_ECC_1BIT_ALL) {
printk(KERN_DEBUG "%s: correctable ECC error = 0x%04x\n",
__func__, ecc);
mtd->ecc_stats.corrected++;
}
}
} else if (state == FL_READING) {
printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n",
__func__, ctrl, interrupt);
return -EIO;
}
if (state == FL_PREPARING_ERASE && !(interrupt & ONENAND_INT_ERASE)) {
printk(KERN_ERR "%s: mb erase timeout! ctrl=0x%04x intr=0x%04x\n",
__func__, ctrl, interrupt);
return -EIO;
}
if (!(interrupt & ONENAND_INT_MASTER)) {
printk(KERN_ERR "%s: timeout! ctrl=0x%04x intr=0x%04x\n",
__func__, ctrl, interrupt);
return -EIO;
}
/* If there's controller error, it's a real error */
if (ctrl & ONENAND_CTRL_ERROR) {
printk(KERN_ERR "%s: controller error = 0x%04x\n",
__func__, ctrl);
if (ctrl & ONENAND_CTRL_LOCK)
printk(KERN_ERR "%s: it's locked error.\n", __func__);
return -EIO;
}
return 0;
}
/*
* onenand_interrupt - [DEFAULT] onenand interrupt handler
* @irq: onenand interrupt number
* @dev_id: interrupt data
*
* complete the work
*/
static irqreturn_t onenand_interrupt(int irq, void *data)
{
struct onenand_chip *this = data;
/* To handle shared interrupt */
if (!this->complete.done)
complete(&this->complete);
return IRQ_HANDLED;
}
/*
* onenand_interrupt_wait - [DEFAULT] wait until the command is done
* @mtd: MTD device structure
* @state: state to select the max. timeout value
*
* Wait for command done.
*/
static int onenand_interrupt_wait(struct mtd_info *mtd, int state)
{
struct onenand_chip *this = mtd->priv;
wait_for_completion(&this->complete);
return onenand_wait(mtd, state);
}
/*
* onenand_try_interrupt_wait - [DEFAULT] try interrupt wait
* @mtd: MTD device structure
* @state: state to select the max. timeout value
*
* Try interrupt based wait (It is used one-time)
*/
static int onenand_try_interrupt_wait(struct mtd_info *mtd, int state)
{
struct onenand_chip *this = mtd->priv;
unsigned long remain, timeout;
/* We use interrupt wait first */
this->wait = onenand_interrupt_wait;
timeout = msecs_to_jiffies(100);
remain = wait_for_completion_timeout(&this->complete, timeout);
if (!remain) {
printk(KERN_INFO "OneNAND: There's no interrupt. "
"We use the normal wait\n");
/* Release the irq */
free_irq(this->irq, this);
this->wait = onenand_wait;
}
return onenand_wait(mtd, state);
}
/*
* onenand_setup_wait - [OneNAND Interface] setup onenand wait method
* @mtd: MTD device structure
*
* There's two method to wait onenand work
* 1. polling - read interrupt status register
* 2. interrupt - use the kernel interrupt method
*/
static void onenand_setup_wait(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
int syscfg;
init_completion(&this->complete);
if (this->irq <= 0) {
this->wait = onenand_wait;
return;
}
if (request_irq(this->irq, &onenand_interrupt,
IRQF_SHARED, "onenand", this)) {
/* If we can't get irq, use the normal wait */
this->wait = onenand_wait;
return;
}
/* Enable interrupt */
syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
syscfg |= ONENAND_SYS_CFG1_IOBE;
this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
this->wait = onenand_try_interrupt_wait;
}
/**
* onenand_bufferram_offset - [DEFAULT] BufferRAM offset
* @mtd: MTD data structure
* @area: BufferRAM area
* @return offset given area
*
* Return BufferRAM offset given area
*/
static inline int onenand_bufferram_offset(struct mtd_info *mtd, int area)
{
struct onenand_chip *this = mtd->priv;
if (ONENAND_CURRENT_BUFFERRAM(this)) {
/* Note: the 'this->writesize' is a real page size */
if (area == ONENAND_DATARAM)
return this->writesize;
if (area == ONENAND_SPARERAM)
return mtd->oobsize;
}
return 0;
}
/**
* onenand_read_bufferram - [OneNAND Interface] Read the bufferram area
* @mtd: MTD data structure
* @area: BufferRAM area
* @buffer: the databuffer to put/get data
* @offset: offset to read from or write to
* @count: number of bytes to read/write
*
* Read the BufferRAM area
*/
static int onenand_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset, size_t count)
{
struct onenand_chip *this = mtd->priv;
void __iomem *bufferram;
bufferram = this->base + area;
bufferram += onenand_bufferram_offset(mtd, area);
if (ONENAND_CHECK_BYTE_ACCESS(count)) {
unsigned short word;
/* Align with word(16-bit) size */
count--;
/* Read word and save byte */
word = this->read_word(bufferram + offset + count);
buffer[count] = (word & 0xff);
}
memcpy(buffer, bufferram + offset, count);
return 0;
}
/**
* onenand_sync_read_bufferram - [OneNAND Interface] Read the bufferram area with Sync. Burst mode
* @mtd: MTD data structure
* @area: BufferRAM area
* @buffer: the databuffer to put/get data
* @offset: offset to read from or write to
* @count: number of bytes to read/write
*
* Read the BufferRAM area with Sync. Burst Mode
*/
static int onenand_sync_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset, size_t count)
{
struct onenand_chip *this = mtd->priv;
void __iomem *bufferram;
bufferram = this->base + area;
bufferram += onenand_bufferram_offset(mtd, area);
this->mmcontrol(mtd, ONENAND_SYS_CFG1_SYNC_READ);
if (ONENAND_CHECK_BYTE_ACCESS(count)) {
unsigned short word;
/* Align with word(16-bit) size */
count--;
/* Read word and save byte */
word = this->read_word(bufferram + offset + count);
buffer[count] = (word & 0xff);
}
memcpy(buffer, bufferram + offset, count);
this->mmcontrol(mtd, 0);
return 0;
}
/**
* onenand_write_bufferram - [OneNAND Interface] Write the bufferram area
* @mtd: MTD data structure
* @area: BufferRAM area
* @buffer: the databuffer to put/get data
* @offset: offset to read from or write to
* @count: number of bytes to read/write
*
* Write the BufferRAM area
*/
static int onenand_write_bufferram(struct mtd_info *mtd, int area,
const unsigned char *buffer, int offset, size_t count)
{
struct onenand_chip *this = mtd->priv;
void __iomem *bufferram;
bufferram = this->base + area;
bufferram += onenand_bufferram_offset(mtd, area);
if (ONENAND_CHECK_BYTE_ACCESS(count)) {
unsigned short word;
int byte_offset;
/* Align with word(16-bit) size */
count--;
/* Calculate byte access offset */
byte_offset = offset + count;
/* Read word and save byte */
word = this->read_word(bufferram + byte_offset);
word = (word & ~0xff) | buffer[count];
this->write_word(word, bufferram + byte_offset);
}
memcpy(bufferram + offset, buffer, count);
return 0;
}
/**
* onenand_get_2x_blockpage - [GENERIC] Get blockpage at 2x program mode
* @mtd: MTD data structure
* @addr: address to check
* @return blockpage address
*
* Get blockpage address at 2x program mode
*/
static int onenand_get_2x_blockpage(struct mtd_info *mtd, loff_t addr)
{
struct onenand_chip *this = mtd->priv;
int blockpage, block, page;
/* Calculate the even block number */
block = (int) (addr >> this->erase_shift) & ~1;
/* Is it the odd plane? */
if (addr & this->writesize)
block++;
page = (int) (addr >> (this->page_shift + 1)) & this->page_mask;
blockpage = (block << 7) | page;
return blockpage;
}
/**
* onenand_check_bufferram - [GENERIC] Check BufferRAM information
* @mtd: MTD data structure
* @addr: address to check
* @return 1 if there are valid data, otherwise 0
*
* Check bufferram if there is data we required
*/
static int onenand_check_bufferram(struct mtd_info *mtd, loff_t addr)
{
struct onenand_chip *this = mtd->priv;
int blockpage, found = 0;
unsigned int i;
if (ONENAND_IS_2PLANE(this))
blockpage = onenand_get_2x_blockpage(mtd, addr);
else
blockpage = (int) (addr >> this->page_shift);
/* Is there valid data? */
i = ONENAND_CURRENT_BUFFERRAM(this);
if (this->bufferram[i].blockpage == blockpage)
found = 1;
else {
/* Check another BufferRAM */
i = ONENAND_NEXT_BUFFERRAM(this);
if (this->bufferram[i].blockpage == blockpage) {
ONENAND_SET_NEXT_BUFFERRAM(this);
found = 1;
}
}
if (found && ONENAND_IS_DDP(this)) {
/* Select DataRAM for DDP */
int block = onenand_block(this, addr);
int value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
}
return found;
}
/**
* onenand_update_bufferram - [GENERIC] Update BufferRAM information
* @mtd: MTD data structure
* @addr: address to update
* @valid: valid flag
*
* Update BufferRAM information
*/
static void onenand_update_bufferram(struct mtd_info *mtd, loff_t addr,
int valid)
{
struct onenand_chip *this = mtd->priv;
int blockpage;
unsigned int i;
if (ONENAND_IS_2PLANE(this))
blockpage = onenand_get_2x_blockpage(mtd, addr);
else
blockpage = (int) (addr >> this->page_shift);
/* Invalidate another BufferRAM */
i = ONENAND_NEXT_BUFFERRAM(this);
if (this->bufferram[i].blockpage == blockpage)
this->bufferram[i].blockpage = -1;
/* Update BufferRAM */
i = ONENAND_CURRENT_BUFFERRAM(this);
if (valid)
this->bufferram[i].blockpage = blockpage;
else
this->bufferram[i].blockpage = -1;
}
/**
* onenand_invalidate_bufferram - [GENERIC] Invalidate BufferRAM information
* @mtd: MTD data structure
* @addr: start address to invalidate
* @len: length to invalidate
*
* Invalidate BufferRAM information
*/
static void onenand_invalidate_bufferram(struct mtd_info *mtd, loff_t addr,
unsigned int len)
{
struct onenand_chip *this = mtd->priv;
int i;
loff_t end_addr = addr + len;
/* Invalidate BufferRAM */
for (i = 0; i < MAX_BUFFERRAM; i++) {
loff_t buf_addr = this->bufferram[i].blockpage << this->page_shift;
if (buf_addr >= addr && buf_addr < end_addr)
this->bufferram[i].blockpage = -1;
}
}
/**
* onenand_get_device - [GENERIC] Get chip for selected access
* @mtd: MTD device structure
* @new_state: the state which is requested
*
* Get the device and lock it for exclusive access
*/
static int onenand_get_device(struct mtd_info *mtd, int new_state)
{
struct onenand_chip *this = mtd->priv;
DECLARE_WAITQUEUE(wait, current);
/*
* Grab the lock and see if the device is available
*/
while (1) {
spin_lock(&this->chip_lock);
if (this->state == FL_READY) {
this->state = new_state;
spin_unlock(&this->chip_lock);
if (new_state != FL_PM_SUSPENDED && this->enable)
this->enable(mtd);
break;
}
if (new_state == FL_PM_SUSPENDED) {
spin_unlock(&this->chip_lock);
return (this->state == FL_PM_SUSPENDED) ? 0 : -EAGAIN;
}
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&this->wq, &wait);
spin_unlock(&this->chip_lock);
schedule();
remove_wait_queue(&this->wq, &wait);
}
return 0;
}
/**
* onenand_release_device - [GENERIC] release chip
* @mtd: MTD device structure
*
* Deselect, release chip lock and wake up anyone waiting on the device
*/
static void onenand_release_device(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
if (this->state != FL_PM_SUSPENDED && this->disable)
this->disable(mtd);
/* Release the chip */
spin_lock(&this->chip_lock);
this->state = FL_READY;
wake_up(&this->wq);
spin_unlock(&this->chip_lock);
}
/**
* onenand_transfer_auto_oob - [INTERN] oob auto-placement transfer
* @mtd: MTD device structure
* @buf: destination address
* @column: oob offset to read from
* @thislen: oob length to read
*/
static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int column,
int thislen)
{
struct onenand_chip *this = mtd->priv;
this->read_bufferram(mtd, ONENAND_SPARERAM, this->oob_buf, 0,
mtd->oobsize);
return mtd_ooblayout_get_databytes(mtd, buf, this->oob_buf,
column, thislen);
}
/**
* onenand_recover_lsb - [Flex-OneNAND] Recover LSB page data
* @mtd: MTD device structure
* @addr: address to recover
* @status: return value from onenand_wait / onenand_bbt_wait
*
* MLC NAND Flash cell has paired pages - LSB page and MSB page. LSB page has
* lower page address and MSB page has higher page address in paired pages.
* If power off occurs during MSB page program, the paired LSB page data can
* become corrupt. LSB page recovery read is a way to read LSB page though page
* data are corrupted. When uncorrectable error occurs as a result of LSB page
* read after power up, issue LSB page recovery read.
*/
static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
{
struct onenand_chip *this = mtd->priv;
int i;
/* Recovery is only for Flex-OneNAND */
if (!FLEXONENAND(this))
return status;
/* check if we failed due to uncorrectable error */
if (!mtd_is_eccerr(status) && status != ONENAND_BBT_READ_ECC_ERROR)
return status;
/* check if address lies in MLC region */
i = flexonenand_region(mtd, addr);
if (mtd->eraseregions[i].erasesize < (1 << this->erase_shift))
return status;
/* We are attempting to reread, so decrement stats.failed
* which was incremented by onenand_wait due to read failure
*/
printk(KERN_INFO "%s: Attempting to recover from uncorrectable read\n",
__func__);
mtd->ecc_stats.failed--;
/* Issue the LSB page recovery command */
this->command(mtd, FLEXONENAND_CMD_RECOVER_LSB, addr, this->writesize);
return this->wait(mtd, FL_READING);
}
/**
* onenand_mlc_read_ops_nolock - MLC OneNAND read main and/or out-of-band
* @mtd: MTD device structure
* @from: offset to read from
* @ops: oob operation description structure
*
* MLC OneNAND / Flex-OneNAND has 4KB page size and 4KB dataram.
* So, read-while-load is not present.
*/
static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct onenand_chip *this = mtd->priv;
struct mtd_ecc_stats stats;
size_t len = ops->len;
size_t ooblen = ops->ooblen;
u_char *buf = ops->datbuf;
u_char *oobbuf = ops->oobbuf;
int read = 0, column, thislen;
int oobread = 0, oobcolumn, thisooblen, oobsize;
int ret = 0;
int writesize = this->writesize;
pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
(int)len);
oobsize = mtd_oobavail(mtd, ops);
oobcolumn = from & (mtd->oobsize - 1);
/* Do not allow reads past end of device */
if (from + len > mtd->size) {
printk(KERN_ERR "%s: Attempt read beyond end of device\n",
__func__);
ops->retlen = 0;
ops->oobretlen = 0;
return -EINVAL;
}
stats = mtd->ecc_stats;
while (read < len) {
cond_resched();
thislen = min_t(int, writesize, len - read);
column = from & (writesize - 1);
if (column + thislen > writesize)
thislen = writesize - column;
if (!onenand_check_bufferram(mtd, from)) {
this->command(mtd, ONENAND_CMD_READ, from, writesize);
ret = this->wait(mtd, FL_READING);
if (unlikely(ret))
ret = onenand_recover_lsb(mtd, from, ret);
onenand_update_bufferram(mtd, from, !ret);
if (mtd_is_eccerr(ret))
ret = 0;
if (ret)
break;
}
this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
if (oobbuf) {
thisooblen = oobsize - oobcolumn;
thisooblen = min_t(int, thisooblen, ooblen - oobread);
if (ops->mode == MTD_OPS_AUTO_OOB)
onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
else
this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
oobread += thisooblen;
oobbuf += thisooblen;
oobcolumn = 0;
}
read += thislen;
if (read == len)
break;
from += thislen;
buf += thislen;
}
/*
* Return success, if no ECC failures, else -EBADMSG
* fs driver will take care of that, because
* retlen == desired len and result == -EBADMSG
*/
ops->retlen = read;
ops->oobretlen = oobread;
if (ret)
return ret;
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
/* return max bitflips per ecc step; ONENANDs correct 1 bit only */
return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
}
/**
* onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band
* @mtd: MTD device structure
* @from: offset to read from
* @ops: oob operation description structure
*
* OneNAND read main and/or out-of-band data
*/
static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct onenand_chip *this = mtd->priv;
struct mtd_ecc_stats stats;
size_t len = ops->len;
size_t ooblen = ops->ooblen;
u_char *buf = ops->datbuf;
u_char *oobbuf = ops->oobbuf;
int read = 0, column, thislen;
int oobread = 0, oobcolumn, thisooblen, oobsize;
int ret = 0, boundary = 0;
int writesize = this->writesize;
pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
(int)len);
oobsize = mtd_oobavail(mtd, ops);
oobcolumn = from & (mtd->oobsize - 1);
/* Do not allow reads past end of device */
if ((from + len) > mtd->size) {
printk(KERN_ERR "%s: Attempt read beyond end of device\n",
__func__);
ops->retlen = 0;
ops->oobretlen = 0;
return -EINVAL;
}
stats = mtd->ecc_stats;
/* Read-while-load method */
/* Do first load to bufferRAM */
if (read < len) {
if (!onenand_check_bufferram(mtd, from)) {
this->command(mtd, ONENAND_CMD_READ, from, writesize);
ret = this->wait(mtd, FL_READING);
onenand_update_bufferram(mtd, from, !ret);
if (mtd_is_eccerr(ret))
ret = 0;
}
}
thislen = min_t(int, writesize, len - read);
column = from & (writesize - 1);
if (column + thislen > writesize)
thislen = writesize - column;
while (!ret) {
/* If there is more to load then start next load */
from += thislen;
if (read + thislen < len) {
this->command(mtd, ONENAND_CMD_READ, from, writesize);
/*
* Chip boundary handling in DDP
* Now we issued chip 1 read and pointed chip 1
* bufferram so we have to point chip 0 bufferram.
*/
if (ONENAND_IS_DDP(this) &&
unlikely(from == (this->chipsize >> 1))) {
this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
boundary = 1;
} else
boundary = 0;
ONENAND_SET_PREV_BUFFERRAM(this);
}
/* While load is going, read from last bufferRAM */
this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
/* Read oob area if needed */
if (oobbuf) {
thisooblen = oobsize - oobcolumn;
thisooblen = min_t(int, thisooblen, ooblen - oobread);
if (ops->mode == MTD_OPS_AUTO_OOB)
onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
else
this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
oobread += thisooblen;
oobbuf += thisooblen;
oobcolumn = 0;
}
/* See if we are done */
read += thislen;
if (read == len)
break;
/* Set up for next read from bufferRAM */
if (unlikely(boundary))
this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
ONENAND_SET_NEXT_BUFFERRAM(this);
buf += thislen;
thislen = min_t(int, writesize, len - read);
column = 0;
cond_resched();
/* Now wait for load */
ret = this->wait(mtd, FL_READING);
onenand_update_bufferram(mtd, from, !ret);
if (mtd_is_eccerr(ret))
ret = 0;
}
/*
* Return success, if no ECC failures, else -EBADMSG
* fs driver will take care of that, because
* retlen == desired len and result == -EBADMSG
*/
ops->retlen = read;
ops->oobretlen = oobread;
if (ret)
return ret;
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
/* return max bitflips per ecc step; ONENANDs correct 1 bit only */
return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
}
/**
* onenand_read_oob_nolock - [MTD Interface] OneNAND read out-of-band
* @mtd: MTD device structure
* @from: offset to read from
* @ops: oob operation description structure
*
* OneNAND read out-of-band data from the spare area
*/
static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct onenand_chip *this = mtd->priv;
struct mtd_ecc_stats stats;
int read = 0, thislen, column, oobsize;
size_t len = ops->ooblen;
unsigned int mode = ops->mode;
u_char *buf = ops->oobbuf;
int ret = 0, readcmd;
from += ops->ooboffs;
pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
(int)len);
/* Initialize return length value */
ops->oobretlen = 0;
if (mode == MTD_OPS_AUTO_OOB)
oobsize = mtd->oobavail;
else
oobsize = mtd->oobsize;
column = from & (mtd->oobsize - 1);
if (unlikely(column >= oobsize)) {
printk(KERN_ERR "%s: Attempted to start read outside oob\n",
__func__);
return -EINVAL;
}
stats = mtd->ecc_stats;
readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
while (read < len) {
cond_resched();
thislen = oobsize - column;
thislen = min_t(int, thislen, len);
this->command(mtd, readcmd, from, mtd->oobsize);
onenand_update_bufferram(mtd, from, 0);
ret = this->wait(mtd, FL_READING);
if (unlikely(ret))
ret = onenand_recover_lsb(mtd, from, ret);
if (ret && !mtd_is_eccerr(ret)) {
printk(KERN_ERR "%s: read failed = 0x%x\n",
__func__, ret);
break;
}
if (mode == MTD_OPS_AUTO_OOB)
onenand_transfer_auto_oob(mtd, buf, column, thislen);
else
this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
read += thislen;
if (read == len)
break;
buf += thislen;
/* Read more? */
if (read < len) {
/* Page size */
from += mtd->writesize;
column = 0;
}
}
ops->oobretlen = read;
if (ret)
return ret;
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
return 0;
}
/**
* onenand_read_oob - [MTD Interface] Read main and/or out-of-band
* @mtd: MTD device structure
* @from: offset to read from
* @ops: oob operation description structure
*
* Read main and/or out-of-band
*/
static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct onenand_chip *this = mtd->priv;
struct mtd_ecc_stats old_stats;
int ret;
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
case MTD_OPS_AUTO_OOB:
break;
case MTD_OPS_RAW:
/* Not implemented yet */
default:
return -EINVAL;
}
onenand_get_device(mtd, FL_READING);
old_stats = mtd->ecc_stats;
if (ops->datbuf)
ret = ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, ops) :
onenand_read_ops_nolock(mtd, from, ops);
else
ret = onenand_read_oob_nolock(mtd, from, ops);
if (ops->stats) {
ops->stats->uncorrectable_errors +=
mtd->ecc_stats.failed - old_stats.failed;
ops->stats->corrected_bitflips +=
mtd->ecc_stats.corrected - old_stats.corrected;
}
onenand_release_device(mtd);
return ret;
}
/**
* onenand_bbt_wait - [DEFAULT] wait until the command is done
* @mtd: MTD device structure
* @state: state to select the max. timeout value
*
* Wait for command done.
*/
static int onenand_bbt_wait(struct mtd_info *mtd, int state)
{
struct onenand_chip *this = mtd->priv;
unsigned long timeout;
unsigned int interrupt, ctrl, ecc, addr1, addr8;
/* The 20 msec is enough */
timeout = jiffies + msecs_to_jiffies(20);
while (time_before(jiffies, timeout)) {
interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
if (interrupt & ONENAND_INT_MASTER)
break;
}
/* To get correct interrupt status in timeout case */
interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
addr1 = this->read_word(this->base + ONENAND_REG_START_ADDRESS1);
addr8 = this->read_word(this->base + ONENAND_REG_START_ADDRESS8);
if (interrupt & ONENAND_INT_READ) {
ecc = onenand_read_ecc(this);
if (ecc & ONENAND_ECC_2BIT_ALL) {
printk(KERN_DEBUG "%s: ecc 0x%04x ctrl 0x%04x "
"intr 0x%04x addr1 %#x addr8 %#x\n",
__func__, ecc, ctrl, interrupt, addr1, addr8);
return ONENAND_BBT_READ_ECC_ERROR;
}
} else {
printk(KERN_ERR "%s: read timeout! ctrl 0x%04x "
"intr 0x%04x addr1 %#x addr8 %#x\n",
__func__, ctrl, interrupt, addr1, addr8);
return ONENAND_BBT_READ_FATAL_ERROR;
}
/* Initial bad block case: 0x2400 or 0x0400 */
if (ctrl & ONENAND_CTRL_ERROR) {
printk(KERN_DEBUG "%s: ctrl 0x%04x intr 0x%04x addr1 %#x "
"addr8 %#x\n", __func__, ctrl, interrupt, addr1, addr8);
return ONENAND_BBT_READ_ERROR;
}
return 0;
}
/**
* onenand_bbt_read_oob - [MTD Interface] OneNAND read out-of-band for bbt scan
* @mtd: MTD device structure
* @from: offset to read from
* @ops: oob operation description structure
*
* OneNAND read out-of-band data from the spare area for bbt scan
*/
int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct onenand_chip *this = mtd->priv;
int read = 0, thislen, column;
int ret = 0, readcmd;
size_t len = ops->ooblen;
u_char *buf = ops->oobbuf;
pr_debug("%s: from = 0x%08x, len = %zi\n", __func__, (unsigned int)from,
len);
/* Initialize return value */
ops->oobretlen = 0;
/* Do not allow reads past end of device */
if (unlikely((from + len) > mtd->size)) {
printk(KERN_ERR "%s: Attempt read beyond end of device\n",
__func__);
return ONENAND_BBT_READ_FATAL_ERROR;
}
/* Grab the lock and see if the device is available */
onenand_get_device(mtd, FL_READING);
column = from & (mtd->oobsize - 1);
readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
while (read < len) {
cond_resched();
thislen = mtd->oobsize - column;
thislen = min_t(int, thislen, len);
this->command(mtd, readcmd, from, mtd->oobsize);
onenand_update_bufferram(mtd, from, 0);
ret = this->bbt_wait(mtd, FL_READING);
if (unlikely(ret))
ret = onenand_recover_lsb(mtd, from, ret);
if (ret)
break;
this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
read += thislen;
if (read == len)
break;
buf += thislen;
/* Read more? */
if (read < len) {
/* Update Page size */
from += this->writesize;
column = 0;
}
}
/* Deselect and wake up anyone waiting on the device */
onenand_release_device(mtd);
ops->oobretlen = read;
return ret;
}
#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
/**
* onenand_verify_oob - [GENERIC] verify the oob contents after a write
* @mtd: MTD device structure
* @buf: the databuffer to verify
* @to: offset to read from
*/
static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to)
{
struct onenand_chip *this = mtd->priv;
u_char *oob_buf = this->oob_buf;
int status, i, readcmd;
readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
this->command(mtd, readcmd, to, mtd->oobsize);
onenand_update_bufferram(mtd, to, 0);
status = this->wait(mtd, FL_READING);
if (status)
return status;
this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
for (i = 0; i < mtd->oobsize; i++)
if (buf[i] != 0xFF && buf[i] != oob_buf[i])
return -EBADMSG;
return 0;
}
/**
* onenand_verify - [GENERIC] verify the chip contents after a write
* @mtd: MTD device structure
* @buf: the databuffer to verify
* @addr: offset to read from
* @len: number of bytes to read and compare
*/
static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len)
{
struct onenand_chip *this = mtd->priv;
int ret = 0;
int thislen, column;
column = addr & (this->writesize - 1);
while (len != 0) {
thislen = min_t(int, this->writesize - column, len);
this->command(mtd, ONENAND_CMD_READ, addr, this->writesize);
onenand_update_bufferram(mtd, addr, 0);
ret = this->wait(mtd, FL_READING);
if (ret)
return ret;
onenand_update_bufferram(mtd, addr, 1);
this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize);
if (memcmp(buf, this->verify_buf + column, thislen))
return -EBADMSG;
len -= thislen;
buf += thislen;
addr += thislen;
column = 0;
}
return 0;
}
#else
#define onenand_verify(...) (0)
#define onenand_verify_oob(...) (0)
#endif
#define NOTALIGNED(x) ((x & (this->subpagesize - 1)) != 0)
static void onenand_panic_wait(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
unsigned int interrupt;
int i;
for (i = 0; i < 2000; i++) {
interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
if (interrupt & ONENAND_INT_MASTER)
break;
udelay(10);
}
}
/**
* onenand_panic_write - [MTD Interface] write buffer to FLASH in a panic context
* @mtd: MTD device structure
* @to: offset to write to
* @len: number of bytes to write
* @retlen: pointer to variable to store the number of written bytes
* @buf: the data to write
*
* Write with ECC
*/
static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct onenand_chip *this = mtd->priv;
int column, subpage;
int written = 0;
if (this->state == FL_PM_SUSPENDED)
return -EBUSY;
/* Wait for any existing operation to clear */
onenand_panic_wait(mtd);
pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
(int)len);
/* Reject writes, which are not page aligned */
if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
__func__);
return -EINVAL;
}
column = to & (mtd->writesize - 1);
/* Loop until all data write */
while (written < len) {
int thislen = min_t(int, mtd->writesize - column, len - written);
u_char *wbuf = (u_char *) buf;
this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
/* Partial page write */
subpage = thislen < mtd->writesize;
if (subpage) {
memset(this->page_buf, 0xff, mtd->writesize);
memcpy(this->page_buf + column, buf, thislen);
wbuf = this->page_buf;
}
this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize);
this->write_bufferram(mtd, ONENAND_SPARERAM, ffchars, 0, mtd->oobsize);
this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
onenand_panic_wait(mtd);
/* In partial page write we don't update bufferram */
onenand_update_bufferram(mtd, to, !subpage);
if (ONENAND_IS_2PLANE(this)) {
ONENAND_SET_BUFFERRAM1(this);
onenand_update_bufferram(mtd, to + this->writesize, !subpage);
}
written += thislen;
if (written == len)
break;
column = 0;
to += thislen;
buf += thislen;
}
*retlen = written;
return 0;
}
/**
* onenand_fill_auto_oob - [INTERN] oob auto-placement transfer
* @mtd: MTD device structure
* @oob_buf: oob buffer
* @buf: source address
* @column: oob offset to write to
* @thislen: oob length to write
*/
static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf,
const u_char *buf, int column, int thislen)
{
return mtd_ooblayout_set_databytes(mtd, buf, oob_buf, column, thislen);
}
/**
* onenand_write_ops_nolock - [OneNAND Interface] write main and/or out-of-band
* @mtd: MTD device structure
* @to: offset to write to
* @ops: oob operation description structure
*
* Write main and/or oob with ECC
*/
static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
struct onenand_chip *this = mtd->priv;
int written = 0, column, thislen = 0, subpage = 0;
int prev = 0, prevlen = 0, prev_subpage = 0, first = 1;
int oobwritten = 0, oobcolumn, thisooblen, oobsize;
size_t len = ops->len;
size_t ooblen = ops->ooblen;
const u_char *buf = ops->datbuf;
const u_char *oob = ops->oobbuf;
u_char *oobbuf;
int ret = 0, cmd;
pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
(int)len);
/* Initialize retlen, in case of early exit */
ops->retlen = 0;
ops->oobretlen = 0;
/* Reject writes, which are not page aligned */
if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
__func__);
return -EINVAL;
}
/* Check zero length */
if (!len)
return 0;
oobsize = mtd_oobavail(mtd, ops);
oobcolumn = to & (mtd->oobsize - 1);
column = to & (mtd->writesize - 1);
/* Loop until all data write */
while (1) {
if (written < len) {
u_char *wbuf = (u_char *) buf;
thislen = min_t(int, mtd->writesize - column, len - written);
thisooblen = min_t(int, oobsize - oobcolumn, ooblen - oobwritten);
cond_resched();
this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
/* Partial page write */
subpage = thislen < mtd->writesize;
if (subpage) {
memset(this->page_buf, 0xff, mtd->writesize);
memcpy(this->page_buf + column, buf, thislen);
wbuf = this->page_buf;
}
this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize);
if (oob) {
oobbuf = this->oob_buf;
/* We send data to spare ram with oobsize
* to prevent byte access */
memset(oobbuf, 0xff, mtd->oobsize);
if (ops->mode == MTD_OPS_AUTO_OOB)
onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen);
else
memcpy(oobbuf + oobcolumn, oob, thisooblen);
oobwritten += thisooblen;
oob += thisooblen;
oobcolumn = 0;
} else
oobbuf = (u_char *) ffchars;
this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
} else
ONENAND_SET_NEXT_BUFFERRAM(this);
/*
* 2 PLANE, MLC, and Flex-OneNAND do not support
* write-while-program feature.
*/
if (!ONENAND_IS_2PLANE(this) && !ONENAND_IS_4KB_PAGE(this) && !first) {
ONENAND_SET_PREV_BUFFERRAM(this);
ret = this->wait(mtd, FL_WRITING);
/* In partial page write we don't update bufferram */
onenand_update_bufferram(mtd, prev, !ret && !prev_subpage);
if (ret) {
written -= prevlen;
printk(KERN_ERR "%s: write failed %d\n",
__func__, ret);
break;
}
if (written == len) {
/* Only check verify write turn on */
ret = onenand_verify(mtd, buf - len, to - len, len);
if (ret)
printk(KERN_ERR "%s: verify failed %d\n",
__func__, ret);
break;
}
ONENAND_SET_NEXT_BUFFERRAM(this);
}
this->ongoing = 0;
cmd = ONENAND_CMD_PROG;
/* Exclude 1st OTP and OTP blocks for cache program feature */
if (ONENAND_IS_CACHE_PROGRAM(this) &&
likely(onenand_block(this, to) != 0) &&
ONENAND_IS_4KB_PAGE(this) &&
((written + thislen) < len)) {
cmd = ONENAND_CMD_2X_CACHE_PROG;
this->ongoing = 1;
}
this->command(mtd, cmd, to, mtd->writesize);
/*
* 2 PLANE, MLC, and Flex-OneNAND wait here
*/
if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this)) {
ret = this->wait(mtd, FL_WRITING);
/* In partial page write we don't update bufferram */
onenand_update_bufferram(mtd, to, !ret && !subpage);
if (ret) {
printk(KERN_ERR "%s: write failed %d\n",
__func__, ret);
break;
}
/* Only check verify write turn on */
ret = onenand_verify(mtd, buf, to, thislen);
if (ret) {
printk(KERN_ERR "%s: verify failed %d\n",
__func__, ret);
break;
}
written += thislen;
if (written == len)
break;
} else
written += thislen;
column = 0;
prev_subpage = subpage;
prev = to;
prevlen = thislen;
to += thislen;
buf += thislen;
first = 0;
}
/* In error case, clear all bufferrams */
if (written != len)
onenand_invalidate_bufferram(mtd, 0, -1);
ops->retlen = written;
ops->oobretlen = oobwritten;
return ret;
}
/**
* onenand_write_oob_nolock - [INTERN] OneNAND write out-of-band
* @mtd: MTD device structure
* @to: offset to write to
* @ops: oob operation description structure
*
* OneNAND write out-of-band
*/
static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
struct onenand_chip *this = mtd->priv;
int column, ret = 0, oobsize;
int written = 0, oobcmd;
u_char *oobbuf;
size_t len = ops->ooblen;
const u_char *buf = ops->oobbuf;
unsigned int mode = ops->mode;
to += ops->ooboffs;
pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
(int)len);
/* Initialize retlen, in case of early exit */
ops->oobretlen = 0;
if (mode == MTD_OPS_AUTO_OOB)
oobsize = mtd->oobavail;
else
oobsize = mtd->oobsize;
column = to & (mtd->oobsize - 1);
if (unlikely(column >= oobsize)) {
printk(KERN_ERR "%s: Attempted to start write outside oob\n",
__func__);
return -EINVAL;
}
/* For compatibility with NAND: Do not allow write past end of page */
if (unlikely(column + len > oobsize)) {
printk(KERN_ERR "%s: Attempt to write past end of page\n",
__func__);
return -EINVAL;
}
oobbuf = this->oob_buf;
oobcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
/* Loop until all data write */
while (written < len) {
int thislen = min_t(int, oobsize, len - written);
cond_resched();
this->command(mtd, ONENAND_CMD_BUFFERRAM, to, mtd->oobsize);
/* We send data to spare ram with oobsize
* to prevent byte access */
memset(oobbuf, 0xff, mtd->oobsize);
if (mode == MTD_OPS_AUTO_OOB)
onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen);
else
memcpy(oobbuf + column, buf, thislen);
this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
if (ONENAND_IS_4KB_PAGE(this)) {
/* Set main area of DataRAM to 0xff*/
memset(this->page_buf, 0xff, mtd->writesize);
this->write_bufferram(mtd, ONENAND_DATARAM,
this->page_buf, 0, mtd->writesize);
}
this->command(mtd, oobcmd, to, mtd->oobsize);
onenand_update_bufferram(mtd, to, 0);
if (ONENAND_IS_2PLANE(this)) {
ONENAND_SET_BUFFERRAM1(this);
onenand_update_bufferram(mtd, to + this->writesize, 0);
}
ret = this->wait(mtd, FL_WRITING);
if (ret) {
printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
break;
}
ret = onenand_verify_oob(mtd, oobbuf, to);
if (ret) {
printk(KERN_ERR "%s: verify failed %d\n",
__func__, ret);
break;
}
written += thislen;
if (written == len)
break;
to += mtd->writesize;
buf += thislen;
column = 0;
}
ops->oobretlen = written;
return ret;
}
/**
* onenand_write_oob - [MTD Interface] NAND write data and/or out-of-band
* @mtd: MTD device structure
* @to: offset to write
* @ops: oob operation description structure
*/
static int onenand_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
int ret;
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
case MTD_OPS_AUTO_OOB:
break;
case MTD_OPS_RAW:
/* Not implemented yet */
default:
return -EINVAL;
}
onenand_get_device(mtd, FL_WRITING);
if (ops->datbuf)
ret = onenand_write_ops_nolock(mtd, to, ops);
else
ret = onenand_write_oob_nolock(mtd, to, ops);
onenand_release_device(mtd);
return ret;
}
/**
* onenand_block_isbad_nolock - [GENERIC] Check if a block is marked bad
* @mtd: MTD device structure
* @ofs: offset from device start
* @allowbbt: 1, if its allowed to access the bbt area
*
* Check, if the block is bad. Either by reading the bad block table or
* calling of the scan function.
*/
static int onenand_block_isbad_nolock(struct mtd_info *mtd, loff_t ofs, int allowbbt)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm = this->bbm;
/* Return info from the table */
return bbm->isbad_bbt(mtd, ofs, allowbbt);
}
static int onenand_multiblock_erase_verify(struct mtd_info *mtd,
struct erase_info *instr)
{
struct onenand_chip *this = mtd->priv;
loff_t addr = instr->addr;
int len = instr->len;
unsigned int block_size = (1 << this->erase_shift);
int ret = 0;
while (len) {
this->command(mtd, ONENAND_CMD_ERASE_VERIFY, addr, block_size);
ret = this->wait(mtd, FL_VERIFYING_ERASE);
if (ret) {
printk(KERN_ERR "%s: Failed verify, block %d\n",
__func__, onenand_block(this, addr));
instr->fail_addr = addr;
return -1;
}
len -= block_size;
addr += block_size;
}
return 0;
}
/**
* onenand_multiblock_erase - [INTERN] erase block(s) using multiblock erase
* @mtd: MTD device structure
* @instr: erase instruction
* @block_size: block size
*
* Erase one or more blocks up to 64 block at a time
*/
static int onenand_multiblock_erase(struct mtd_info *mtd,
struct erase_info *instr,
unsigned int block_size)
{
struct onenand_chip *this = mtd->priv;
loff_t addr = instr->addr;
int len = instr->len;
int eb_count = 0;
int ret = 0;
int bdry_block = 0;
if (ONENAND_IS_DDP(this)) {
loff_t bdry_addr = this->chipsize >> 1;
if (addr < bdry_addr && (addr + len) > bdry_addr)
bdry_block = bdry_addr >> this->erase_shift;
}
/* Pre-check bbs */
while (len) {
/* Check if we have a bad block, we do not erase bad blocks */
if (onenand_block_isbad_nolock(mtd, addr, 0)) {
printk(KERN_WARNING "%s: attempt to erase a bad block "
"at addr 0x%012llx\n",
__func__, (unsigned long long) addr);
return -EIO;
}
len -= block_size;
addr += block_size;
}
len = instr->len;
addr = instr->addr;
/* loop over 64 eb batches */
while (len) {
struct erase_info verify_instr = *instr;
int max_eb_count = MB_ERASE_MAX_BLK_COUNT;
verify_instr.addr = addr;
verify_instr.len = 0;
/* do not cross chip boundary */
if (bdry_block) {
int this_block = (addr >> this->erase_shift);
if (this_block < bdry_block) {
max_eb_count = min(max_eb_count,
(bdry_block - this_block));
}
}
eb_count = 0;
while (len > block_size && eb_count < (max_eb_count - 1)) {
this->command(mtd, ONENAND_CMD_MULTIBLOCK_ERASE,
addr, block_size);
onenand_invalidate_bufferram(mtd, addr, block_size);
ret = this->wait(mtd, FL_PREPARING_ERASE);
if (ret) {
printk(KERN_ERR "%s: Failed multiblock erase, "
"block %d\n", __func__,
onenand_block(this, addr));
instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
return -EIO;
}
len -= block_size;
addr += block_size;
eb_count++;
}
/* last block of 64-eb series */
cond_resched();
this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
onenand_invalidate_bufferram(mtd, addr, block_size);
ret = this->wait(mtd, FL_ERASING);
/* Check if it is write protected */
if (ret) {
printk(KERN_ERR "%s: Failed erase, block %d\n",
__func__, onenand_block(this, addr));
instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
return -EIO;
}
len -= block_size;
addr += block_size;
eb_count++;
/* verify */
verify_instr.len = eb_count * block_size;
if (onenand_multiblock_erase_verify(mtd, &verify_instr)) {
instr->fail_addr = verify_instr.fail_addr;
return -EIO;
}
}
return 0;
}
/**
* onenand_block_by_block_erase - [INTERN] erase block(s) using regular erase
* @mtd: MTD device structure
* @instr: erase instruction
* @region: erase region
* @block_size: erase block size
*
* Erase one or more blocks one block at a time
*/
static int onenand_block_by_block_erase(struct mtd_info *mtd,
struct erase_info *instr,
struct mtd_erase_region_info *region,
unsigned int block_size)
{
struct onenand_chip *this = mtd->priv;
loff_t addr = instr->addr;
int len = instr->len;
loff_t region_end = 0;
int ret = 0;
if (region) {
/* region is set for Flex-OneNAND */
region_end = region->offset + region->erasesize * region->numblocks;
}
/* Loop through the blocks */
while (len) {
cond_resched();
/* Check if we have a bad block, we do not erase bad blocks */
if (onenand_block_isbad_nolock(mtd, addr, 0)) {
printk(KERN_WARNING "%s: attempt to erase a bad block "
"at addr 0x%012llx\n",
__func__, (unsigned long long) addr);
return -EIO;
}
this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
onenand_invalidate_bufferram(mtd, addr, block_size);
ret = this->wait(mtd, FL_ERASING);
/* Check, if it is write protected */
if (ret) {
printk(KERN_ERR "%s: Failed erase, block %d\n",
__func__, onenand_block(this, addr));
instr->fail_addr = addr;
return -EIO;
}
len -= block_size;
addr += block_size;
if (region && addr == region_end) {
if (!len)
break;
region++;
block_size = region->erasesize;
region_end = region->offset + region->erasesize * region->numblocks;
if (len & (block_size - 1)) {
/* FIXME: This should be handled at MTD partitioning level. */
printk(KERN_ERR "%s: Unaligned address\n",
__func__);
return -EIO;
}
}
}
return 0;
}
/**
* onenand_erase - [MTD Interface] erase block(s)
* @mtd: MTD device structure
* @instr: erase instruction
*
* Erase one or more blocks
*/
static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct onenand_chip *this = mtd->priv;
unsigned int block_size;
loff_t addr = instr->addr;
loff_t len = instr->len;
int ret = 0;
struct mtd_erase_region_info *region = NULL;
loff_t region_offset = 0;
pr_debug("%s: start=0x%012llx, len=%llu\n", __func__,
(unsigned long long)instr->addr,
(unsigned long long)instr->len);
if (FLEXONENAND(this)) {
/* Find the eraseregion of this address */
int i = flexonenand_region(mtd, addr);
region = &mtd->eraseregions[i];
block_size = region->erasesize;
/* Start address within region must align on block boundary.
* Erase region's start offset is always block start address.
*/
region_offset = region->offset;
} else
block_size = 1 << this->erase_shift;
/* Start address must align on block boundary */
if (unlikely((addr - region_offset) & (block_size - 1))) {
printk(KERN_ERR "%s: Unaligned address\n", __func__);
return -EINVAL;
}
/* Length must align on block boundary */
if (unlikely(len & (block_size - 1))) {
printk(KERN_ERR "%s: Length not block aligned\n", __func__);
return -EINVAL;
}
/* Grab the lock and see if the device is available */
onenand_get_device(mtd, FL_ERASING);
if (ONENAND_IS_4KB_PAGE(this) || region ||
instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
/* region is set for Flex-OneNAND (no mb erase) */
ret = onenand_block_by_block_erase(mtd, instr,
region, block_size);
} else {
ret = onenand_multiblock_erase(mtd, instr, block_size);
}
/* Deselect and wake up anyone waiting on the device */
onenand_release_device(mtd);
return ret;
}
/**
* onenand_sync - [MTD Interface] sync
* @mtd: MTD device structure
*
* Sync is actually a wait for chip ready function
*/
static void onenand_sync(struct mtd_info *mtd)
{
pr_debug("%s: called\n", __func__);
/* Grab the lock and see if the device is available */
onenand_get_device(mtd, FL_SYNCING);
/* Release it and go back */
onenand_release_device(mtd);
}
/**
* onenand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad
* @mtd: MTD device structure
* @ofs: offset relative to mtd start
*
* Check whether the block is bad
*/
static int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
int ret;
onenand_get_device(mtd, FL_READING);
ret = onenand_block_isbad_nolock(mtd, ofs, 0);
onenand_release_device(mtd);
return ret;
}
/**
* onenand_default_block_markbad - [DEFAULT] mark a block bad
* @mtd: MTD device structure
* @ofs: offset from device start
*
* This is the default implementation, which can be overridden by
* a hardware specific driver.
*/
static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm = this->bbm;
u_char buf[2] = {0, 0};
struct mtd_oob_ops ops = {
.mode = MTD_OPS_PLACE_OOB,
.ooblen = 2,
.oobbuf = buf,
.ooboffs = 0,
};
int block;
/* Get block number */
block = onenand_block(this, ofs);
if (bbm->bbt)
bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
/* We write two bytes, so we don't have to mess with 16-bit access */
ofs += mtd->oobsize + (this->badblockpos & ~0x01);
/* FIXME : What to do when marking SLC block in partition
* with MLC erasesize? For now, it is not advisable to
* create partitions containing both SLC and MLC regions.
*/
return onenand_write_oob_nolock(mtd, ofs, &ops);
}
/**
* onenand_block_markbad - [MTD Interface] Mark the block at the given offset as bad
* @mtd: MTD device structure
* @ofs: offset relative to mtd start
*
* Mark the block as bad
*/
static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct onenand_chip *this = mtd->priv;
int ret;
ret = onenand_block_isbad(mtd, ofs);
if (ret) {
/* If it was bad already, return success and do nothing */
if (ret > 0)
return 0;
return ret;
}
onenand_get_device(mtd, FL_WRITING);
ret = this->block_markbad(mtd, ofs);
onenand_release_device(mtd);
return ret;
}
/**
* onenand_do_lock_cmd - [OneNAND Interface] Lock or unlock block(s)
* @mtd: MTD device structure
* @ofs: offset relative to mtd start
* @len: number of bytes to lock or unlock
* @cmd: lock or unlock command
*
* Lock or unlock one or more blocks
*/
static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int cmd)
{
struct onenand_chip *this = mtd->priv;
int start, end, block, value, status;
int wp_status_mask;
start = onenand_block(this, ofs);
end = onenand_block(this, ofs + len) - 1;
if (cmd == ONENAND_CMD_LOCK)
wp_status_mask = ONENAND_WP_LS;
else
wp_status_mask = ONENAND_WP_US;
/* Continuous lock scheme */
if (this->options & ONENAND_HAS_CONT_LOCK) {
/* Set start block address */
this->write_word(start, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
/* Set end block address */
this->write_word(end, this->base + ONENAND_REG_END_BLOCK_ADDRESS);
/* Write lock command */
this->command(mtd, cmd, 0, 0);
/* There's no return value */
this->wait(mtd, FL_LOCKING);
/* Sanity check */
while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
& ONENAND_CTRL_ONGO)
continue;
/* Check lock status */
status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
if (!(status & wp_status_mask))
printk(KERN_ERR "%s: wp status = 0x%x\n",
__func__, status);
return 0;
}
/* Block lock scheme */
for (block = start; block < end + 1; block++) {
/* Set block address */
value = onenand_block_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
/* Select DataRAM for DDP */
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
/* Set start block address */
this->write_word(block, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
/* Write lock command */
this->command(mtd, cmd, 0, 0);
/* There's no return value */
this->wait(mtd, FL_LOCKING);
/* Sanity check */
while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
& ONENAND_CTRL_ONGO)
continue;
/* Check lock status */
status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
if (!(status & wp_status_mask))
printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n",
__func__, block, status);
}
return 0;
}
/**
* onenand_lock - [MTD Interface] Lock block(s)
* @mtd: MTD device structure
* @ofs: offset relative to mtd start
* @len: number of bytes to unlock
*
* Lock one or more blocks
*/
static int onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret;
onenand_get_device(mtd, FL_LOCKING);
ret = onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_LOCK);
onenand_release_device(mtd);
return ret;
}
/**
* onenand_unlock - [MTD Interface] Unlock block(s)
* @mtd: MTD device structure
* @ofs: offset relative to mtd start
* @len: number of bytes to unlock
*
* Unlock one or more blocks
*/
static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret;
onenand_get_device(mtd, FL_LOCKING);
ret = onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
onenand_release_device(mtd);
return ret;
}
/**
* onenand_check_lock_status - [OneNAND Interface] Check lock status
* @this: onenand chip data structure
*
* Check lock status
*/
static int onenand_check_lock_status(struct onenand_chip *this)
{
unsigned int value, block, status;
unsigned int end;
end = this->chipsize >> this->erase_shift;
for (block = 0; block < end; block++) {
/* Set block address */
value = onenand_block_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
/* Select DataRAM for DDP */
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
/* Set start block address */
this->write_word(block, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
/* Check lock status */
status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
if (!(status & ONENAND_WP_US)) {
printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n",
__func__, block, status);
return 0;
}
}
return 1;
}
/**
* onenand_unlock_all - [OneNAND Interface] unlock all blocks
* @mtd: MTD device structure
*
* Unlock all blocks
*/
static void onenand_unlock_all(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
loff_t ofs = 0;
loff_t len = mtd->size;
if (this->options & ONENAND_HAS_UNLOCK_ALL) {
/* Set start block address */
this->write_word(0, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
/* Write unlock command */
this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
/* There's no return value */
this->wait(mtd, FL_LOCKING);
/* Sanity check */
while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
& ONENAND_CTRL_ONGO)
continue;
/* Don't check lock status */
if (this->options & ONENAND_SKIP_UNLOCK_CHECK)
return;
/* Check lock status */
if (onenand_check_lock_status(this))
return;
/* Workaround for all block unlock in DDP */
if (ONENAND_IS_DDP(this) && !FLEXONENAND(this)) {
/* All blocks on another chip */
ofs = this->chipsize >> 1;
len = this->chipsize >> 1;
}
}
onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
}
#ifdef CONFIG_MTD_ONENAND_OTP
/**
* onenand_otp_command - Send OTP specific command to OneNAND device
* @mtd: MTD device structure
* @cmd: the command to be sent
* @addr: offset to read from or write to
* @len: number of bytes to read or write
*/
static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr,
size_t len)
{
struct onenand_chip *this = mtd->priv;
int value, block, page;
/* Address translation */
switch (cmd) {
case ONENAND_CMD_OTP_ACCESS:
block = (int) (addr >> this->erase_shift);
page = -1;
break;
default:
block = (int) (addr >> this->erase_shift);
page = (int) (addr >> this->page_shift);
if (ONENAND_IS_2PLANE(this)) {
/* Make the even block number */
block &= ~1;
/* Is it the odd plane? */
if (addr & this->writesize)
block++;
page >>= 1;
}
page &= this->page_mask;
break;
}
if (block != -1) {
/* Write 'DFS, FBA' of Flash */
value = onenand_block_address(this, block);
this->write_word(value, this->base +
ONENAND_REG_START_ADDRESS1);
}
if (page != -1) {
/* Now we use page size operation */
int sectors = 4, count = 4;
int dataram;
switch (cmd) {
default:
if (ONENAND_IS_2PLANE(this) && cmd == ONENAND_CMD_PROG)
cmd = ONENAND_CMD_2X_PROG;
dataram = ONENAND_CURRENT_BUFFERRAM(this);
break;
}
/* Write 'FPA, FSA' of Flash */
value = onenand_page_address(page, sectors);
this->write_word(value, this->base +
ONENAND_REG_START_ADDRESS8);
/* Write 'BSA, BSC' of DataRAM */
value = onenand_buffer_address(dataram, sectors, count);
this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
}
/* Interrupt clear */
this->write_word(ONENAND_INT_CLEAR, this->base + ONENAND_REG_INTERRUPT);
/* Write command */
this->write_word(cmd, this->base + ONENAND_REG_COMMAND);
return 0;
}
/**
* onenand_otp_write_oob_nolock - [INTERN] OneNAND write out-of-band, specific to OTP
* @mtd: MTD device structure
* @to: offset to write to
* @ops: oob operation description structure
*
* OneNAND write out-of-band only for OTP
*/
static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
struct onenand_chip *this = mtd->priv;
int column, ret = 0, oobsize;
int written = 0;
u_char *oobbuf;
size_t len = ops->ooblen;
const u_char *buf = ops->oobbuf;
int block, value, status;
to += ops->ooboffs;
/* Initialize retlen, in case of early exit */
ops->oobretlen = 0;
oobsize = mtd->oobsize;
column = to & (mtd->oobsize - 1);
oobbuf = this->oob_buf;
/* Loop until all data write */
while (written < len) {
int thislen = min_t(int, oobsize, len - written);
cond_resched();
block = (int) (to >> this->erase_shift);
/*
* Write 'DFS, FBA' of Flash
* Add: F100h DQ=DFS, FBA
*/
value = onenand_block_address(this, block);
this->write_word(value, this->base +
ONENAND_REG_START_ADDRESS1);
/*
* Select DataRAM for DDP
* Add: F101h DQ=DBS
*/
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base +
ONENAND_REG_START_ADDRESS2);
ONENAND_SET_NEXT_BUFFERRAM(this);
/*
* Enter OTP access mode
*/
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
/* We send data to spare ram with oobsize
* to prevent byte access */
memcpy(oobbuf + column, buf, thislen);
/*
* Write Data into DataRAM
* Add: 8th Word
* in sector0/spare/page0
* DQ=XXFCh
*/
this->write_bufferram(mtd, ONENAND_SPARERAM,
oobbuf, 0, mtd->oobsize);
onenand_otp_command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize);
onenand_update_bufferram(mtd, to, 0);
if (ONENAND_IS_2PLANE(this)) {
ONENAND_SET_BUFFERRAM1(this);
onenand_update_bufferram(mtd, to + this->writesize, 0);
}
ret = this->wait(mtd, FL_WRITING);
if (ret) {
printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
break;
}
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
this->wait(mtd, FL_RESETTING);
status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
status &= 0x60;
if (status == 0x60) {
printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
printk(KERN_DEBUG "1st Block\tLOCKED\n");
printk(KERN_DEBUG "OTP Block\tLOCKED\n");
} else if (status == 0x20) {
printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
printk(KERN_DEBUG "1st Block\tLOCKED\n");
printk(KERN_DEBUG "OTP Block\tUN-LOCKED\n");
} else if (status == 0x40) {
printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
printk(KERN_DEBUG "1st Block\tUN-LOCKED\n");
printk(KERN_DEBUG "OTP Block\tLOCKED\n");
} else {
printk(KERN_DEBUG "Reboot to check\n");
}
written += thislen;
if (written == len)
break;
to += mtd->writesize;
buf += thislen;
column = 0;
}
ops->oobretlen = written;
return ret;
}
/* Internal OTP operation */
typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len,
size_t *retlen, u_char *buf);
/**
* do_otp_read - [DEFAULT] Read OTP block area
* @mtd: MTD device structure
* @from: The offset to read
* @len: number of bytes to read
* @retlen: pointer to variable to store the number of readbytes
* @buf: the databuffer to put/get data
*
* Read OTP block area.
*/
static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct onenand_chip *this = mtd->priv;
struct mtd_oob_ops ops = {
.len = len,
.ooblen = 0,
.datbuf = buf,
.oobbuf = NULL,
};
int ret;
/* Enter OTP access mode */
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
ret = ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, &ops) :
onenand_read_ops_nolock(mtd, from, &ops);
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
this->wait(mtd, FL_RESETTING);
return ret;
}
/**
* do_otp_write - [DEFAULT] Write OTP block area
* @mtd: MTD device structure
* @to: The offset to write
* @len: number of bytes to write
* @retlen: pointer to variable to store the number of write bytes
* @buf: the databuffer to put/get data
*
* Write OTP block area.
*/
static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, u_char *buf)
{
struct onenand_chip *this = mtd->priv;
unsigned char *pbuf = buf;
int ret;
struct mtd_oob_ops ops = { };
/* Force buffer page aligned */
if (len < mtd->writesize) {
memcpy(this->page_buf, buf, len);
memset(this->page_buf + len, 0xff, mtd->writesize - len);
pbuf = this->page_buf;
len = mtd->writesize;
}
/* Enter OTP access mode */
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
ops.len = len;
ops.ooblen = 0;
ops.datbuf = pbuf;
ops.oobbuf = NULL;
ret = onenand_write_ops_nolock(mtd, to, &ops);
*retlen = ops.retlen;
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
this->wait(mtd, FL_RESETTING);
return ret;
}
/**
* do_otp_lock - [DEFAULT] Lock OTP block area
* @mtd: MTD device structure
* @from: The offset to lock
* @len: number of bytes to lock
* @retlen: pointer to variable to store the number of lock bytes
* @buf: the databuffer to put/get data
*
* Lock OTP block area.
*/
static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct onenand_chip *this = mtd->priv;
struct mtd_oob_ops ops = { };
int ret;
if (FLEXONENAND(this)) {
/* Enter OTP access mode */
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
/*
* For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
* main area of page 49.
*/
ops.len = mtd->writesize;
ops.ooblen = 0;
ops.datbuf = buf;
ops.oobbuf = NULL;
ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops);
*retlen = ops.retlen;
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
this->wait(mtd, FL_RESETTING);
} else {
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooblen = len;
ops.oobbuf = buf;
ops.ooboffs = 0;
ret = onenand_otp_write_oob_nolock(mtd, from, &ops);
*retlen = ops.oobretlen;
}
return ret;
}
/**
* onenand_otp_walk - [DEFAULT] Handle OTP operation
* @mtd: MTD device structure
* @from: The offset to read/write
* @len: number of bytes to read/write
* @retlen: pointer to variable to store the number of read bytes
* @buf: the databuffer to put/get data
* @action: do given action
* @mode: specify user and factory
*
* Handle OTP operation.
*/
static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf,
otp_op_t action, int mode)
{
struct onenand_chip *this = mtd->priv;
int otp_pages;
int density;
int ret = 0;
*retlen = 0;
density = onenand_get_density(this->device_id);
if (density < ONENAND_DEVICE_DENSITY_512Mb)
otp_pages = 20;
else
otp_pages = 50;
if (mode == MTD_OTP_FACTORY) {
from += mtd->writesize * otp_pages;
otp_pages = ONENAND_PAGES_PER_BLOCK - otp_pages;
}
/* Check User/Factory boundary */
if (mode == MTD_OTP_USER) {
if (mtd->writesize * otp_pages < from + len)
return 0;
} else {
if (mtd->writesize * otp_pages < len)
return 0;
}
onenand_get_device(mtd, FL_OTPING);
while (len > 0 && otp_pages > 0) {
if (!action) { /* OTP Info functions */
struct otp_info *otpinfo;
len -= sizeof(struct otp_info);
if (len <= 0) {
ret = -ENOSPC;
break;
}
otpinfo = (struct otp_info *) buf;
otpinfo->start = from;
otpinfo->length = mtd->writesize;
otpinfo->locked = 0;
from += mtd->writesize;
buf += sizeof(struct otp_info);
*retlen += sizeof(struct otp_info);
} else {
size_t tmp_retlen;
ret = action(mtd, from, len, &tmp_retlen, buf);
if (ret)
break;
buf += tmp_retlen;
len -= tmp_retlen;
*retlen += tmp_retlen;
}
otp_pages--;
}
onenand_release_device(mtd);
return ret;
}
/**
* onenand_get_fact_prot_info - [MTD Interface] Read factory OTP info
* @mtd: MTD device structure
* @len: number of bytes to read
* @retlen: pointer to variable to store the number of read bytes
* @buf: the databuffer to put/get data
*
* Read factory OTP info.
*/
static int onenand_get_fact_prot_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *buf)
{
return onenand_otp_walk(mtd, 0, len, retlen, (u_char *) buf, NULL,
MTD_OTP_FACTORY);
}
/**
* onenand_read_fact_prot_reg - [MTD Interface] Read factory OTP area
* @mtd: MTD device structure
* @from: The offset to read
* @len: number of bytes to read
* @retlen: pointer to variable to store the number of read bytes
* @buf: the databuffer to put/get data
*
* Read factory OTP area.
*/
static int onenand_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
return onenand_otp_walk(mtd, from, len, retlen, buf, do_otp_read, MTD_OTP_FACTORY);
}
/**
* onenand_get_user_prot_info - [MTD Interface] Read user OTP info
* @mtd: MTD device structure
* @retlen: pointer to variable to store the number of read bytes
* @len: number of bytes to read
* @buf: the databuffer to put/get data
*
* Read user OTP info.
*/
static int onenand_get_user_prot_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *buf)
{
return onenand_otp_walk(mtd, 0, len, retlen, (u_char *) buf, NULL,
MTD_OTP_USER);
}
/**
* onenand_read_user_prot_reg - [MTD Interface] Read user OTP area
* @mtd: MTD device structure
* @from: The offset to read
* @len: number of bytes to read
* @retlen: pointer to variable to store the number of read bytes
* @buf: the databuffer to put/get data
*
* Read user OTP area.
*/
static int onenand_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
return onenand_otp_walk(mtd, from, len, retlen, buf, do_otp_read, MTD_OTP_USER);
}
/**
* onenand_write_user_prot_reg - [MTD Interface] Write user OTP area
* @mtd: MTD device structure
* @from: The offset to write
* @len: number of bytes to write
* @retlen: pointer to variable to store the number of write bytes
* @buf: the databuffer to put/get data
*
* Write user OTP area.
*/
static int onenand_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, const u_char *buf)
{
return onenand_otp_walk(mtd, from, len, retlen, (u_char *)buf,
do_otp_write, MTD_OTP_USER);
}
/**
* onenand_lock_user_prot_reg - [MTD Interface] Lock user OTP area
* @mtd: MTD device structure
* @from: The offset to lock
* @len: number of bytes to unlock
*
* Write lock mark on spare area in page 0 in OTP block
*/
static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
struct onenand_chip *this = mtd->priv;
u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf;
size_t retlen;
int ret;
unsigned int otp_lock_offset = ONENAND_OTP_LOCK_OFFSET;
memset(buf, 0xff, FLEXONENAND(this) ? this->writesize
: mtd->oobsize);
/*
* Write lock mark to 8th word of sector0 of page0 of the spare0.
* We write 16 bytes spare area instead of 2 bytes.
* For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
* main area of page 49.
*/
from = 0;
len = FLEXONENAND(this) ? mtd->writesize : 16;
/*
* Note: OTP lock operation
* OTP block : 0xXXFC XX 1111 1100
* 1st block : 0xXXF3 (If chip support) XX 1111 0011
* Both : 0xXXF0 (If chip support) XX 1111 0000
*/
if (FLEXONENAND(this))
otp_lock_offset = FLEXONENAND_OTP_LOCK_OFFSET;
/* ONENAND_OTP_AREA | ONENAND_OTP_BLOCK0 | ONENAND_OTP_AREA_BLOCK0 */
if (otp == 1)
buf[otp_lock_offset] = 0xFC;
else if (otp == 2)
buf[otp_lock_offset] = 0xF3;
else if (otp == 3)
buf[otp_lock_offset] = 0xF0;
else if (otp != 0)
printk(KERN_DEBUG "[OneNAND] Invalid option selected for OTP\n");
ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER);
return ret ? : retlen;
}
#endif /* CONFIG_MTD_ONENAND_OTP */
/**
* onenand_check_features - Check and set OneNAND features
* @mtd: MTD data structure
*
* Check and set OneNAND features
* - lock scheme
* - two plane
*/
static void onenand_check_features(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
unsigned int density, process, numbufs;
/* Lock scheme depends on density and process */
density = onenand_get_density(this->device_id);
process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT;
numbufs = this->read_word(this->base + ONENAND_REG_NUM_BUFFERS) >> 8;
/* Lock scheme */
switch (density) {
case ONENAND_DEVICE_DENSITY_8Gb:
this->options |= ONENAND_HAS_NOP_1;
fallthrough;
case ONENAND_DEVICE_DENSITY_4Gb:
if (ONENAND_IS_DDP(this))
this->options |= ONENAND_HAS_2PLANE;
else if (numbufs == 1) {
this->options |= ONENAND_HAS_4KB_PAGE;
this->options |= ONENAND_HAS_CACHE_PROGRAM;
/*
* There are two different 4KiB pagesize chips
* and no way to detect it by H/W config values.
*
* To detect the correct NOP for each chips,
* It should check the version ID as workaround.
*
* Now it has as following
* KFM4G16Q4M has NOP 4 with version ID 0x0131
* KFM4G16Q5M has NOP 1 with versoin ID 0x013e
*/
if ((this->version_id & 0xf) == 0xe)
this->options |= ONENAND_HAS_NOP_1;
}
this->options |= ONENAND_HAS_UNLOCK_ALL;
break;
case ONENAND_DEVICE_DENSITY_2Gb:
/* 2Gb DDP does not have 2 plane */
if (!ONENAND_IS_DDP(this))
this->options |= ONENAND_HAS_2PLANE;
this->options |= ONENAND_HAS_UNLOCK_ALL;
break;
case ONENAND_DEVICE_DENSITY_1Gb:
/* A-Die has all block unlock */
if (process)
this->options |= ONENAND_HAS_UNLOCK_ALL;
break;
default:
/* Some OneNAND has continuous lock scheme */
if (!process)
this->options |= ONENAND_HAS_CONT_LOCK;
break;
}
/* The MLC has 4KiB pagesize. */
if (ONENAND_IS_MLC(this))
this->options |= ONENAND_HAS_4KB_PAGE;
if (ONENAND_IS_4KB_PAGE(this))
this->options &= ~ONENAND_HAS_2PLANE;
if (FLEXONENAND(this)) {
this->options &= ~ONENAND_HAS_CONT_LOCK;
this->options |= ONENAND_HAS_UNLOCK_ALL;
}
if (this->options & ONENAND_HAS_CONT_LOCK)
printk(KERN_DEBUG "Lock scheme is Continuous Lock\n");
if (this->options & ONENAND_HAS_UNLOCK_ALL)
printk(KERN_DEBUG "Chip support all block unlock\n");
if (this->options & ONENAND_HAS_2PLANE)
printk(KERN_DEBUG "Chip has 2 plane\n");
if (this->options & ONENAND_HAS_4KB_PAGE)
printk(KERN_DEBUG "Chip has 4KiB pagesize\n");
if (this->options & ONENAND_HAS_CACHE_PROGRAM)
printk(KERN_DEBUG "Chip has cache program feature\n");
}
/**
* onenand_print_device_info - Print device & version ID
* @device: device ID
* @version: version ID
*
* Print device & version ID
*/
static void onenand_print_device_info(int device, int version)
{
int vcc, demuxed, ddp, density, flexonenand;
vcc = device & ONENAND_DEVICE_VCC_MASK;
demuxed = device & ONENAND_DEVICE_IS_DEMUX;
ddp = device & ONENAND_DEVICE_IS_DDP;
density = onenand_get_density(device);
flexonenand = device & DEVICE_IS_FLEXONENAND;
printk(KERN_INFO "%s%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
demuxed ? "" : "Muxed ",
flexonenand ? "Flex-" : "",
ddp ? "(DDP)" : "",
(16 << density),
vcc ? "2.65/3.3" : "1.8",
device);
printk(KERN_INFO "OneNAND version = 0x%04x\n", version);
}
static const struct onenand_manufacturers onenand_manuf_ids[] = {
{ONENAND_MFR_SAMSUNG, "Samsung"},
{ONENAND_MFR_NUMONYX, "Numonyx"},
};
/**
* onenand_check_maf - Check manufacturer ID
* @manuf: manufacturer ID
*
* Check manufacturer ID
*/
static int onenand_check_maf(int manuf)
{
int size = ARRAY_SIZE(onenand_manuf_ids);
char *name;
int i;
for (i = 0; i < size; i++)
if (manuf == onenand_manuf_ids[i].id)
break;
if (i < size)
name = onenand_manuf_ids[i].name;
else
name = "Unknown";
printk(KERN_DEBUG "OneNAND Manufacturer: %s (0x%0x)\n", name, manuf);
return (i == size);
}
/**
* flexonenand_get_boundary - Reads the SLC boundary
* @mtd: MTD data structure
*/
static int flexonenand_get_boundary(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
unsigned die, bdry;
int syscfg, locked;
/* Disable ECC */
syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
this->write_word((syscfg | 0x0100), this->base + ONENAND_REG_SYS_CFG1);
for (die = 0; die < this->dies; die++) {
this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
this->wait(mtd, FL_SYNCING);
this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
this->wait(mtd, FL_READING);
bdry = this->read_word(this->base + ONENAND_DATARAM);
if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
locked = 0;
else
locked = 1;
this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
this->wait(mtd, FL_RESETTING);
printk(KERN_INFO "Die %d boundary: %d%s\n", die,
this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
}
/* Enable ECC */
this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
return 0;
}
/**
* flexonenand_get_size - Fill up fields in onenand_chip and mtd_info
* boundary[], diesize[], mtd->size, mtd->erasesize
* @mtd: - MTD device structure
*/
static void flexonenand_get_size(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
int die, i, eraseshift, density;
int blksperdie, maxbdry;
loff_t ofs;
density = onenand_get_density(this->device_id);
blksperdie = ((loff_t)(16 << density) << 20) >> (this->erase_shift);
blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
maxbdry = blksperdie - 1;
eraseshift = this->erase_shift - 1;
mtd->numeraseregions = this->dies << 1;
/* This fills up the device boundary */
flexonenand_get_boundary(mtd);
die = ofs = 0;
i = -1;
for (; die < this->dies; die++) {
if (!die || this->boundary[die-1] != maxbdry) {
i++;
mtd->eraseregions[i].offset = ofs;
mtd->eraseregions[i].erasesize = 1 << eraseshift;
mtd->eraseregions[i].numblocks =
this->boundary[die] + 1;
ofs += mtd->eraseregions[i].numblocks << eraseshift;
eraseshift++;
} else {
mtd->numeraseregions -= 1;
mtd->eraseregions[i].numblocks +=
this->boundary[die] + 1;
ofs += (this->boundary[die] + 1) << (eraseshift - 1);
}
if (this->boundary[die] != maxbdry) {
i++;
mtd->eraseregions[i].offset = ofs;
mtd->eraseregions[i].erasesize = 1 << eraseshift;
mtd->eraseregions[i].numblocks = maxbdry ^
this->boundary[die];
ofs += mtd->eraseregions[i].numblocks << eraseshift;
eraseshift--;
} else
mtd->numeraseregions -= 1;
}
/* Expose MLC erase size except when all blocks are SLC */
mtd->erasesize = 1 << this->erase_shift;
if (mtd->numeraseregions == 1)
mtd->erasesize >>= 1;
printk(KERN_INFO "Device has %d eraseregions\n", mtd->numeraseregions);
for (i = 0; i < mtd->numeraseregions; i++)
printk(KERN_INFO "[offset: 0x%08x, erasesize: 0x%05x,"
" numblocks: %04u]\n",
(unsigned int) mtd->eraseregions[i].offset,
mtd->eraseregions[i].erasesize,
mtd->eraseregions[i].numblocks);
for (die = 0, mtd->size = 0; die < this->dies; die++) {
this->diesize[die] = (loff_t)blksperdie << this->erase_shift;
this->diesize[die] -= (loff_t)(this->boundary[die] + 1)
<< (this->erase_shift - 1);
mtd->size += this->diesize[die];
}
}
/**
* flexonenand_check_blocks_erased - Check if blocks are erased
* @mtd: mtd info structure
* @start: first erase block to check
* @end: last erase block to check
*
* Converting an unerased block from MLC to SLC
* causes byte values to change. Since both data and its ECC
* have changed, reads on the block give uncorrectable error.
* This might lead to the block being detected as bad.
*
* Avoid this by ensuring that the block to be converted is
* erased.
*/
static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int end)
{
struct onenand_chip *this = mtd->priv;
int i, ret;
int block;
struct mtd_oob_ops ops = {
.mode = MTD_OPS_PLACE_OOB,
.ooboffs = 0,
.ooblen = mtd->oobsize,
.datbuf = NULL,
.oobbuf = this->oob_buf,
};
loff_t addr;
printk(KERN_DEBUG "Check blocks from %d to %d\n", start, end);
for (block = start; block <= end; block++) {
addr = flexonenand_addr(this, block);
if (onenand_block_isbad_nolock(mtd, addr, 0))
continue;
/*
* Since main area write results in ECC write to spare,
* it is sufficient to check only ECC bytes for change.
*/
ret = onenand_read_oob_nolock(mtd, addr, &ops);
if (ret)
return ret;
for (i = 0; i < mtd->oobsize; i++)
if (this->oob_buf[i] != 0xff)
break;
if (i != mtd->oobsize) {
printk(KERN_WARNING "%s: Block %d not erased.\n",
__func__, block);
return 1;
}
}
return 0;
}
/*
* flexonenand_set_boundary - Writes the SLC boundary
*/
static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
int boundary, int lock)
{
struct onenand_chip *this = mtd->priv;
int ret, density, blksperdie, old, new, thisboundary;
loff_t addr;
/* Change only once for SDP Flex-OneNAND */
if (die && (!ONENAND_IS_DDP(this)))
return 0;
/* boundary value of -1 indicates no required change */
if (boundary < 0 || boundary == this->boundary[die])
return 0;
density = onenand_get_density(this->device_id);
blksperdie = ((16 << density) << 20) >> this->erase_shift;
blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
if (boundary >= blksperdie) {
printk(KERN_ERR "%s: Invalid boundary value. "
"Boundary not changed.\n", __func__);
return -EINVAL;
}
/* Check if converting blocks are erased */
old = this->boundary[die] + (die * this->density_mask);
new = boundary + (die * this->density_mask);
ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new));
if (ret) {
printk(KERN_ERR "%s: Please erase blocks "
"before boundary change\n", __func__);
return ret;
}
this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
this->wait(mtd, FL_SYNCING);
/* Check is boundary is locked */
this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
this->wait(mtd, FL_READING);
thisboundary = this->read_word(this->base + ONENAND_DATARAM);
if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
printk(KERN_ERR "%s: boundary locked\n", __func__);
ret = 1;
goto out;
}
printk(KERN_INFO "Changing die %d boundary: %d%s\n",
die, boundary, lock ? "(Locked)" : "(Unlocked)");
addr = die ? this->diesize[0] : 0;
boundary &= FLEXONENAND_PI_MASK;
boundary |= lock ? 0 : (3 << FLEXONENAND_PI_UNLOCK_SHIFT);
this->command(mtd, ONENAND_CMD_ERASE, addr, 0);
ret = this->wait(mtd, FL_ERASING);
if (ret) {
printk(KERN_ERR "%s: Failed PI erase for Die %d\n",
__func__, die);
goto out;
}
this->write_word(boundary, this->base + ONENAND_DATARAM);
this->command(mtd, ONENAND_CMD_PROG, addr, 0);
ret = this->wait(mtd, FL_WRITING);
if (ret) {
printk(KERN_ERR "%s: Failed PI write for Die %d\n",
__func__, die);
goto out;
}
this->command(mtd, FLEXONENAND_CMD_PI_UPDATE, die, 0);
ret = this->wait(mtd, FL_WRITING);
out:
this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
this->wait(mtd, FL_RESETTING);
if (!ret)
/* Recalculate device size on boundary change*/
flexonenand_get_size(mtd);
return ret;
}
/**
* onenand_chip_probe - [OneNAND Interface] The generic chip probe
* @mtd: MTD device structure
*
* OneNAND detection method:
* Compare the values from command with ones from register
*/
static int onenand_chip_probe(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
int bram_maf_id, bram_dev_id, maf_id, dev_id;
int syscfg;
/* Save system configuration 1 */
syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
/* Clear Sync. Burst Read mode to read BootRAM */
this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE), this->base + ONENAND_REG_SYS_CFG1);
/* Send the command for reading device ID from BootRAM */
this->write_word(ONENAND_CMD_READID, this->base + ONENAND_BOOTRAM);
/* Read manufacturer and device IDs from BootRAM */
bram_maf_id = this->read_word(this->base + ONENAND_BOOTRAM + 0x0);
bram_dev_id = this->read_word(this->base + ONENAND_BOOTRAM + 0x2);
/* Reset OneNAND to read default register values */
this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM);
/* Wait reset */
this->wait(mtd, FL_RESETTING);
/* Restore system configuration 1 */
this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
/* Check manufacturer ID */
if (onenand_check_maf(bram_maf_id))
return -ENXIO;
/* Read manufacturer and device IDs from Register */
maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
/* Check OneNAND device */
if (maf_id != bram_maf_id || dev_id != bram_dev_id)
return -ENXIO;
return 0;
}
/**
* onenand_probe - [OneNAND Interface] Probe the OneNAND device
* @mtd: MTD device structure
*/
static int onenand_probe(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
int dev_id, ver_id;
int density;
int ret;
ret = this->chip_probe(mtd);
if (ret)
return ret;
/* Device and version IDs from Register */
dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
/* Flash device information */
onenand_print_device_info(dev_id, ver_id);
this->device_id = dev_id;
this->version_id = ver_id;
/* Check OneNAND features */
onenand_check_features(mtd);
density = onenand_get_density(dev_id);
if (FLEXONENAND(this)) {
this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
/* Maximum possible erase regions */
mtd->numeraseregions = this->dies << 1;
mtd->eraseregions =
kcalloc(this->dies << 1,
sizeof(struct mtd_erase_region_info),
GFP_KERNEL);
if (!mtd->eraseregions)
return -ENOMEM;
}
/*
* For Flex-OneNAND, chipsize represents maximum possible device size.
* mtd->size represents the actual device size.
*/
this->chipsize = (16 << density) << 20;
/* OneNAND page size & block size */
/* The data buffer size is equal to page size */
mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
/* We use the full BufferRAM */
if (ONENAND_IS_4KB_PAGE(this))
mtd->writesize <<= 1;
mtd->oobsize = mtd->writesize >> 5;
/* Pages per a block are always 64 in OneNAND */
mtd->erasesize = mtd->writesize << 6;
/*
* Flex-OneNAND SLC area has 64 pages per block.
* Flex-OneNAND MLC area has 128 pages per block.
* Expose MLC erase size to find erase_shift and page_mask.
*/
if (FLEXONENAND(this))
mtd->erasesize <<= 1;
this->erase_shift = ffs(mtd->erasesize) - 1;
this->page_shift = ffs(mtd->writesize) - 1;
this->page_mask = (1 << (this->erase_shift - this->page_shift)) - 1;
/* Set density mask. it is used for DDP */
if (ONENAND_IS_DDP(this))
this->density_mask = this->chipsize >> (this->erase_shift + 1);
/* It's real page size */
this->writesize = mtd->writesize;
/* REVISIT: Multichip handling */
if (FLEXONENAND(this))
flexonenand_get_size(mtd);
else
mtd->size = this->chipsize;
/*
* We emulate the 4KiB page and 256KiB erase block size
* But oobsize is still 64 bytes.
* It is only valid if you turn on 2X program support,
* Otherwise it will be ignored by compiler.
*/
if (ONENAND_IS_2PLANE(this)) {
mtd->writesize <<= 1;
mtd->erasesize <<= 1;
}
return 0;
}
/**
* onenand_suspend - [MTD Interface] Suspend the OneNAND flash
* @mtd: MTD device structure
*/
static int onenand_suspend(struct mtd_info *mtd)
{
return onenand_get_device(mtd, FL_PM_SUSPENDED);
}
/**
* onenand_resume - [MTD Interface] Resume the OneNAND flash
* @mtd: MTD device structure
*/
static void onenand_resume(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
if (this->state == FL_PM_SUSPENDED)
onenand_release_device(mtd);
else
printk(KERN_ERR "%s: resume() called for the chip which is not "
"in suspended state\n", __func__);
}
/**
* onenand_scan - [OneNAND Interface] Scan for the OneNAND device
* @mtd: MTD device structure
* @maxchips: Number of chips to scan for
*
* This fills out all the not initialized function pointers
* with the defaults.
* The flash ID is read and the mtd/chip structures are
* filled with the appropriate values.
*/
int onenand_scan(struct mtd_info *mtd, int maxchips)
{
int i, ret;
struct onenand_chip *this = mtd->priv;
if (!this->read_word)
this->read_word = onenand_readw;
if (!this->write_word)
this->write_word = onenand_writew;
if (!this->command)
this->command = onenand_command;
if (!this->wait)
onenand_setup_wait(mtd);
if (!this->bbt_wait)
this->bbt_wait = onenand_bbt_wait;
if (!this->unlock_all)
this->unlock_all = onenand_unlock_all;
if (!this->chip_probe)
this->chip_probe = onenand_chip_probe;
if (!this->read_bufferram)
this->read_bufferram = onenand_read_bufferram;
if (!this->write_bufferram)
this->write_bufferram = onenand_write_bufferram;
if (!this->block_markbad)
this->block_markbad = onenand_default_block_markbad;
if (!this->scan_bbt)
this->scan_bbt = onenand_default_bbt;
if (onenand_probe(mtd))
return -ENXIO;
/* Set Sync. Burst Read after probing */
if (this->mmcontrol) {
printk(KERN_INFO "OneNAND Sync. Burst Read support\n");
this->read_bufferram = onenand_sync_read_bufferram;
}
/* Allocate buffers, if necessary */
if (!this->page_buf) {
this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL);
if (!this->page_buf)
return -ENOMEM;
#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
this->verify_buf = kzalloc(mtd->writesize, GFP_KERNEL);
if (!this->verify_buf) {
kfree(this->page_buf);
return -ENOMEM;
}
#endif
this->options |= ONENAND_PAGEBUF_ALLOC;
}
if (!this->oob_buf) {
this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
if (!this->oob_buf) {
if (this->options & ONENAND_PAGEBUF_ALLOC) {
this->options &= ~ONENAND_PAGEBUF_ALLOC;
#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
kfree(this->verify_buf);
#endif
kfree(this->page_buf);
}
return -ENOMEM;
}
this->options |= ONENAND_OOBBUF_ALLOC;
}
this->state = FL_READY;
init_waitqueue_head(&this->wq);
spin_lock_init(&this->chip_lock);
/*
* Allow subpage writes up to oobsize.
*/
switch (mtd->oobsize) {
case 128:
if (FLEXONENAND(this)) {
mtd_set_ooblayout(mtd, &flexonenand_ooblayout_ops);
mtd->subpage_sft = 0;
} else {
mtd_set_ooblayout(mtd, &onenand_oob_128_ooblayout_ops);
mtd->subpage_sft = 2;
}
if (ONENAND_IS_NOP_1(this))
mtd->subpage_sft = 0;
break;
case 64:
mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops);
mtd->subpage_sft = 2;
break;
case 32:
mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops);
mtd->subpage_sft = 1;
break;
default:
printk(KERN_WARNING "%s: No OOB scheme defined for oobsize %d\n",
__func__, mtd->oobsize);
mtd->subpage_sft = 0;
/* To prevent kernel oops */
mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops);
break;
}
this->subpagesize = mtd->writesize >> mtd->subpage_sft;
/*
* The number of bytes available for a client to place data into
* the out of band area
*/
ret = mtd_ooblayout_count_freebytes(mtd);
if (ret < 0)
ret = 0;
mtd->oobavail = ret;
mtd->ecc_strength = 1;
/* Fill in remaining MTD driver data */
mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
mtd->_erase = onenand_erase;
mtd->_point = NULL;
mtd->_unpoint = NULL;
mtd->_read_oob = onenand_read_oob;
mtd->_write_oob = onenand_write_oob;
mtd->_panic_write = onenand_panic_write;
#ifdef CONFIG_MTD_ONENAND_OTP
mtd->_get_fact_prot_info = onenand_get_fact_prot_info;
mtd->_read_fact_prot_reg = onenand_read_fact_prot_reg;
mtd->_get_user_prot_info = onenand_get_user_prot_info;
mtd->_read_user_prot_reg = onenand_read_user_prot_reg;
mtd->_write_user_prot_reg = onenand_write_user_prot_reg;
mtd->_lock_user_prot_reg = onenand_lock_user_prot_reg;
#endif
mtd->_sync = onenand_sync;
mtd->_lock = onenand_lock;
mtd->_unlock = onenand_unlock;
mtd->_suspend = onenand_suspend;
mtd->_resume = onenand_resume;
mtd->_block_isbad = onenand_block_isbad;
mtd->_block_markbad = onenand_block_markbad;
mtd->owner = THIS_MODULE;
mtd->writebufsize = mtd->writesize;
/* Unlock whole block */
if (!(this->options & ONENAND_SKIP_INITIAL_UNLOCKING))
this->unlock_all(mtd);
/* Set the bad block marker position */
this->badblockpos = ONENAND_BADBLOCK_POS;
ret = this->scan_bbt(mtd);
if ((!FLEXONENAND(this)) || ret)
return ret;
/* Change Flex-OneNAND boundaries if required */
for (i = 0; i < MAX_DIES; i++)
flexonenand_set_boundary(mtd, i, flex_bdry[2 * i],
flex_bdry[(2 * i) + 1]);
return 0;
}
/**
* onenand_release - [OneNAND Interface] Free resources held by the OneNAND device
* @mtd: MTD device structure
*/
void onenand_release(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
/* Deregister partitions */
mtd_device_unregister(mtd);
/* Free bad block table memory, if allocated */
if (this->bbm) {
struct bbm_info *bbm = this->bbm;
kfree(bbm->bbt);
kfree(this->bbm);
}
/* Buffers allocated by onenand_scan */
if (this->options & ONENAND_PAGEBUF_ALLOC) {
kfree(this->page_buf);
#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
kfree(this->verify_buf);
#endif
}
if (this->options & ONENAND_OOBBUF_ALLOC)
kfree(this->oob_buf);
kfree(mtd->eraseregions);
}
EXPORT_SYMBOL_GPL(onenand_scan);
EXPORT_SYMBOL_GPL(onenand_release);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kyungmin Park <[email protected]>");
MODULE_DESCRIPTION("Generic OneNAND flash driver code");
| linux-master | drivers/mtd/nand/onenand/onenand_base.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* OneNAND driver for OMAP2 / OMAP3
*
* Copyright © 2005-2006 Nokia Corporation
*
* Author: Jarkko Lavinen <[email protected]> and Juha Yrjölä
* IRQ and DMA support written by Timo Teras
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
#include <linux/omap-gpmc.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
#include <asm/mach/flash.h>
#define DRIVER_NAME "omap2-onenand"
#define ONENAND_BUFRAM_SIZE (1024 * 5)
struct omap2_onenand {
struct platform_device *pdev;
int gpmc_cs;
unsigned long phys_base;
struct gpio_desc *int_gpiod;
struct mtd_info mtd;
struct onenand_chip onenand;
struct completion irq_done;
struct completion dma_done;
struct dma_chan *dma_chan;
};
static void omap2_onenand_dma_complete_func(void *completion)
{
complete(completion);
}
static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
{
struct omap2_onenand *c = dev_id;
complete(&c->irq_done);
return IRQ_HANDLED;
}
static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
{
return readw(c->onenand.base + reg);
}
static inline void write_reg(struct omap2_onenand *c, unsigned short value,
int reg)
{
writew(value, c->onenand.base + reg);
}
static int omap2_onenand_set_cfg(struct omap2_onenand *c,
bool sr, bool sw,
int latency, int burst_len)
{
unsigned short reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
reg |= latency << ONENAND_SYS_CFG1_BRL_SHIFT;
switch (burst_len) {
case 0: /* continuous */
break;
case 4:
reg |= ONENAND_SYS_CFG1_BL_4;
break;
case 8:
reg |= ONENAND_SYS_CFG1_BL_8;
break;
case 16:
reg |= ONENAND_SYS_CFG1_BL_16;
break;
case 32:
reg |= ONENAND_SYS_CFG1_BL_32;
break;
default:
return -EINVAL;
}
if (latency > 5)
reg |= ONENAND_SYS_CFG1_HF;
if (latency > 7)
reg |= ONENAND_SYS_CFG1_VHF;
if (sr)
reg |= ONENAND_SYS_CFG1_SYNC_READ;
if (sw)
reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
write_reg(c, reg, ONENAND_REG_SYS_CFG1);
return 0;
}
static int omap2_onenand_get_freq(int ver)
{
switch ((ver >> 4) & 0xf) {
case 0:
return 40;
case 1:
return 54;
case 2:
return 66;
case 3:
return 83;
case 4:
return 104;
}
return -EINVAL;
}
static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
{
printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
msg, state, ctrl, intr);
}
static void wait_warn(char *msg, int state, unsigned int ctrl,
unsigned int intr)
{
printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
"intr 0x%04x\n", msg, state, ctrl, intr);
}
static int omap2_onenand_wait(struct mtd_info *mtd, int state)
{
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
struct onenand_chip *this = mtd->priv;
unsigned int intr = 0;
unsigned int ctrl, ctrl_mask;
unsigned long timeout;
u32 syscfg;
if (state == FL_RESETTING || state == FL_PREPARING_ERASE ||
state == FL_VERIFYING_ERASE) {
int i = 21;
unsigned int intr_flags = ONENAND_INT_MASTER;
switch (state) {
case FL_RESETTING:
intr_flags |= ONENAND_INT_RESET;
break;
case FL_PREPARING_ERASE:
intr_flags |= ONENAND_INT_ERASE;
break;
case FL_VERIFYING_ERASE:
i = 101;
break;
}
while (--i) {
udelay(1);
intr = read_reg(c, ONENAND_REG_INTERRUPT);
if (intr & ONENAND_INT_MASTER)
break;
}
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
if (ctrl & ONENAND_CTRL_ERROR) {
wait_err("controller error", state, ctrl, intr);
return -EIO;
}
if ((intr & intr_flags) == intr_flags)
return 0;
/* Continue in wait for interrupt branch */
}
if (state != FL_READING) {
int result;
/* Turn interrupts on */
syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
syscfg |= ONENAND_SYS_CFG1_IOBE;
write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
/* Add a delay to let GPIO settle */
syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
}
reinit_completion(&c->irq_done);
result = gpiod_get_value(c->int_gpiod);
if (result < 0) {
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
intr = read_reg(c, ONENAND_REG_INTERRUPT);
wait_err("gpio error", state, ctrl, intr);
return result;
} else if (result == 0) {
int retry_cnt = 0;
retry:
if (!wait_for_completion_io_timeout(&c->irq_done,
msecs_to_jiffies(20))) {
/* Timeout after 20ms */
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
if (ctrl & ONENAND_CTRL_ONGO &&
!this->ongoing) {
/*
* The operation seems to be still going
* so give it some more time.
*/
retry_cnt += 1;
if (retry_cnt < 3)
goto retry;
intr = read_reg(c,
ONENAND_REG_INTERRUPT);
wait_err("timeout", state, ctrl, intr);
return -EIO;
}
intr = read_reg(c, ONENAND_REG_INTERRUPT);
if ((intr & ONENAND_INT_MASTER) == 0)
wait_warn("timeout", state, ctrl, intr);
}
}
} else {
int retry_cnt = 0;
/* Turn interrupts off */
syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
syscfg &= ~ONENAND_SYS_CFG1_IOBE;
write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
timeout = jiffies + msecs_to_jiffies(20);
while (1) {
if (time_before(jiffies, timeout)) {
intr = read_reg(c, ONENAND_REG_INTERRUPT);
if (intr & ONENAND_INT_MASTER)
break;
} else {
/* Timeout after 20ms */
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
if (ctrl & ONENAND_CTRL_ONGO) {
/*
* The operation seems to be still going
* so give it some more time.
*/
retry_cnt += 1;
if (retry_cnt < 3) {
timeout = jiffies +
msecs_to_jiffies(20);
continue;
}
}
break;
}
}
}
intr = read_reg(c, ONENAND_REG_INTERRUPT);
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
if (intr & ONENAND_INT_READ) {
int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
if (ecc) {
unsigned int addr1, addr8;
addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
if (ecc & ONENAND_ECC_2BIT_ALL) {
printk(KERN_ERR "onenand_wait: ECC error = "
"0x%04x, addr1 %#x, addr8 %#x\n",
ecc, addr1, addr8);
mtd->ecc_stats.failed++;
return -EBADMSG;
} else if (ecc & ONENAND_ECC_1BIT_ALL) {
printk(KERN_NOTICE "onenand_wait: correctable "
"ECC error = 0x%04x, addr1 %#x, "
"addr8 %#x\n", ecc, addr1, addr8);
mtd->ecc_stats.corrected++;
}
}
} else if (state == FL_READING) {
wait_err("timeout", state, ctrl, intr);
return -EIO;
}
if (ctrl & ONENAND_CTRL_ERROR) {
wait_err("controller error", state, ctrl, intr);
if (ctrl & ONENAND_CTRL_LOCK)
printk(KERN_ERR "onenand_wait: "
"Device is write protected!!!\n");
return -EIO;
}
ctrl_mask = 0xFE9F;
if (this->ongoing)
ctrl_mask &= ~0x8000;
if (ctrl & ctrl_mask)
wait_warn("unexpected controller status", state, ctrl, intr);
return 0;
}
static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
{
struct onenand_chip *this = mtd->priv;
if (ONENAND_CURRENT_BUFFERRAM(this)) {
if (area == ONENAND_DATARAM)
return this->writesize;
if (area == ONENAND_SPARERAM)
return mtd->oobsize;
}
return 0;
}
static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
dma_addr_t src, dma_addr_t dst,
size_t count)
{
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count,
DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
return -EIO;
}
reinit_completion(&c->dma_done);
tx->callback = omap2_onenand_dma_complete_func;
tx->callback_param = &c->dma_done;
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n");
return -EIO;
}
dma_async_issue_pending(c->dma_chan);
if (!wait_for_completion_io_timeout(&c->dma_done,
msecs_to_jiffies(20))) {
dmaengine_terminate_sync(c->dma_chan);
return -ETIMEDOUT;
}
return 0;
}
static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset,
size_t count)
{
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
struct onenand_chip *this = mtd->priv;
struct device *dev = &c->pdev->dev;
void *buf = (void *)buffer;
dma_addr_t dma_src, dma_dst;
int bram_offset, err;
size_t xtra;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
/*
* If the buffer address is not DMA-able, len is not long enough to
* make DMA transfers profitable or if invoked from panic_write()
* fallback to PIO mode.
*/
if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
count < 384 || mtd->oops_panic_write)
goto out_copy;
xtra = count & 3;
if (xtra) {
count -= xtra;
memcpy(buf + count, this->base + bram_offset + count, xtra);
}
dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
dma_src = c->phys_base + bram_offset;
if (dma_mapping_error(dev, dma_dst)) {
dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
goto out_copy;
}
err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
if (!err)
return 0;
dev_err(dev, "timeout waiting for DMA\n");
out_copy:
memcpy(buf, this->base + bram_offset, count);
return 0;
}
static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
const unsigned char *buffer,
int offset, size_t count)
{
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
struct onenand_chip *this = mtd->priv;
struct device *dev = &c->pdev->dev;
void *buf = (void *)buffer;
dma_addr_t dma_src, dma_dst;
int bram_offset, err;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
/*
* If the buffer address is not DMA-able, len is not long enough to
* make DMA transfers profitable or if invoked from panic_write()
* fallback to PIO mode.
*/
if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
count < 384 || mtd->oops_panic_write)
goto out_copy;
dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
dma_dst = c->phys_base + bram_offset;
if (dma_mapping_error(dev, dma_src)) {
dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
goto out_copy;
}
err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_page(dev, dma_src, count, DMA_TO_DEVICE);
if (!err)
return 0;
dev_err(dev, "timeout waiting for DMA\n");
out_copy:
memcpy(this->base + bram_offset, buf, count);
return 0;
}
static void omap2_onenand_shutdown(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
/* With certain content in the buffer RAM, the OMAP boot ROM code
* can recognize the flash chip incorrectly. Zero it out before
* soft reset.
*/
memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
}
static int omap2_onenand_probe(struct platform_device *pdev)
{
u32 val;
dma_cap_mask_t mask;
int freq, latency, r;
struct resource *res;
struct omap2_onenand *c;
struct gpmc_onenand_info info;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
r = of_property_read_u32(np, "reg", &val);
if (r) {
dev_err(dev, "reg not found in DT\n");
return r;
}
c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL);
if (!c)
return -ENOMEM;
init_completion(&c->irq_done);
init_completion(&c->dma_done);
c->gpmc_cs = val;
c->onenand.base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(c->onenand.base))
return PTR_ERR(c->onenand.base);
c->phys_base = res->start;
c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
if (IS_ERR(c->int_gpiod)) {
/* Just try again if this happens */
return dev_err_probe(dev, PTR_ERR(c->int_gpiod), "error getting gpio\n");
}
if (c->int_gpiod) {
r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod),
omap2_onenand_interrupt,
IRQF_TRIGGER_RISING, "onenand", c);
if (r)
return r;
c->onenand.wait = omap2_onenand_wait;
}
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
c->dma_chan = dma_request_channel(mask, NULL, NULL);
if (c->dma_chan) {
c->onenand.read_bufferram = omap2_onenand_read_bufferram;
c->onenand.write_bufferram = omap2_onenand_write_bufferram;
}
c->pdev = pdev;
c->mtd.priv = &c->onenand;
c->mtd.dev.parent = dev;
mtd_set_of_node(&c->mtd, dev->of_node);
dev_info(dev, "initializing on CS%d (0x%08lx), va %p, %s mode\n",
c->gpmc_cs, c->phys_base, c->onenand.base,
c->dma_chan ? "DMA" : "PIO");
r = onenand_scan(&c->mtd, 1);
if (r < 0)
goto err_release_dma;
freq = omap2_onenand_get_freq(c->onenand.version_id);
if (freq > 0) {
switch (freq) {
case 104:
latency = 7;
break;
case 83:
latency = 6;
break;
case 66:
latency = 5;
break;
case 56:
latency = 4;
break;
default: /* 40 MHz or lower */
latency = 3;
break;
}
r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs,
freq, latency, &info);
if (r)
goto err_release_onenand;
r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write,
latency, info.burst_len);
if (r)
goto err_release_onenand;
if (info.sync_read || info.sync_write)
dev_info(dev, "optimized timings for %d MHz\n", freq);
}
r = mtd_device_register(&c->mtd, NULL, 0);
if (r)
goto err_release_onenand;
platform_set_drvdata(pdev, c);
return 0;
err_release_onenand:
onenand_release(&c->mtd);
err_release_dma:
if (c->dma_chan)
dma_release_channel(c->dma_chan);
return r;
}
static void omap2_onenand_remove(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
onenand_release(&c->mtd);
if (c->dma_chan)
dma_release_channel(c->dma_chan);
omap2_onenand_shutdown(pdev);
}
static const struct of_device_id omap2_onenand_id_table[] = {
{ .compatible = "ti,omap2-onenand", },
{},
};
MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
.remove_new = omap2_onenand_remove,
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
.of_match_table = omap2_onenand_id_table,
},
};
module_platform_driver(omap2_onenand_driver);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarkko Lavinen <[email protected]>");
MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
| linux-master | drivers/mtd/nand/onenand/onenand_omap2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2005 Samsung Electronics
* Kyungmin Park <[email protected]>
*
* Overview:
* This is a device driver for the OneNAND flash for generic boards.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
/*
* Note: Driver name and platform data format have been updated!
*
* This version of the driver is named "onenand-flash" and takes struct
* onenand_platform_data as platform data. The old ARM-specific version
* with the name "onenand" used to take struct flash_platform_data.
*/
#define DRIVER_NAME "onenand-flash"
struct onenand_info {
struct mtd_info mtd;
struct onenand_chip onenand;
};
static int generic_onenand_probe(struct platform_device *pdev)
{
struct onenand_info *info;
struct onenand_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *res = pdev->resource;
unsigned long size = resource_size(res);
int err;
info = kzalloc(sizeof(struct onenand_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
if (!request_mem_region(res->start, size, dev_name(&pdev->dev))) {
err = -EBUSY;
goto out_free_info;
}
info->onenand.base = ioremap(res->start, size);
if (!info->onenand.base) {
err = -ENOMEM;
goto out_release_mem_region;
}
info->onenand.mmcontrol = pdata ? pdata->mmcontrol : NULL;
err = platform_get_irq(pdev, 0);
if (err < 0)
goto out_iounmap;
info->onenand.irq = err;
info->mtd.dev.parent = &pdev->dev;
info->mtd.priv = &info->onenand;
if (onenand_scan(&info->mtd, 1)) {
err = -ENXIO;
goto out_iounmap;
}
err = mtd_device_register(&info->mtd, pdata ? pdata->parts : NULL,
pdata ? pdata->nr_parts : 0);
platform_set_drvdata(pdev, info);
return 0;
out_iounmap:
iounmap(info->onenand.base);
out_release_mem_region:
release_mem_region(res->start, size);
out_free_info:
kfree(info);
return err;
}
static void generic_onenand_remove(struct platform_device *pdev)
{
struct onenand_info *info = platform_get_drvdata(pdev);
struct resource *res = pdev->resource;
unsigned long size = resource_size(res);
if (info) {
onenand_release(&info->mtd);
release_mem_region(res->start, size);
iounmap(info->onenand.base);
kfree(info);
}
}
static struct platform_driver generic_onenand_driver = {
.driver = {
.name = DRIVER_NAME,
},
.probe = generic_onenand_probe,
.remove_new = generic_onenand_remove,
};
module_platform_driver(generic_onenand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kyungmin Park <[email protected]>");
MODULE_DESCRIPTION("Glue layer for OneNAND flash on generic boards");
MODULE_ALIAS("platform:" DRIVER_NAME);
| linux-master | drivers/mtd/nand/onenand/generic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016-2017 Micron Technology, Inc.
*
* Authors:
* Peter Pan <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_MICRON 0x2c
#define MICRON_STATUS_ECC_MASK GENMASK(7, 4)
#define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4)
#define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4)
#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
#define MICRON_STATUS_ECC_7TO8_BITFLIPS (5 << 4)
#define MICRON_CFG_CR BIT(0)
/*
* As per datasheet, die selection is done by the 6th bit of Die
* Select Register (Address 0xD0).
*/
#define MICRON_DIE_SELECT_REG 0xD0
#define MICRON_SELECT_DIE(x) ((x) << 6)
static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(x4_write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(x4_update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
/* Micron MT29F2G01AAAED Device */
static SPINAND_OP_VARIANTS(x4_read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(x1_write_cache_variants,
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(x1_update_cache_variants,
SPINAND_PROG_LOAD(false, 0, NULL, 0));
static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
region->offset = mtd->oobsize / 2;
region->length = mtd->oobsize / 2;
return 0;
}
static int micron_8_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
/* Reserve 2 bytes for the BBM. */
region->offset = 2;
region->length = (mtd->oobsize / 2) - 2;
return 0;
}
static const struct mtd_ooblayout_ops micron_8_ooblayout = {
.ecc = micron_8_ooblayout_ecc,
.free = micron_8_ooblayout_free,
};
static int micron_4_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
if (section >= spinand->base.memorg.pagesize /
mtd->ecc_step_size)
return -ERANGE;
region->offset = (section * 16) + 8;
region->length = 8;
return 0;
}
static int micron_4_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
if (section >= spinand->base.memorg.pagesize /
mtd->ecc_step_size)
return -ERANGE;
if (section) {
region->offset = 16 * section;
region->length = 8;
} else {
/* section 0 has two bytes reserved for the BBM */
region->offset = 2;
region->length = 6;
}
return 0;
}
static const struct mtd_ooblayout_ops micron_4_ooblayout = {
.ecc = micron_4_ooblayout_ecc,
.free = micron_4_ooblayout_free,
};
static int micron_select_target(struct spinand_device *spinand,
unsigned int target)
{
struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG,
spinand->scratchbuf);
if (target > 1)
return -EINVAL;
*spinand->scratchbuf = MICRON_SELECT_DIE(target);
return spi_mem_exec_op(spinand->spimem, &op);
}
static int micron_8_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
switch (status & MICRON_STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
return 0;
case STATUS_ECC_UNCOR_ERROR:
return -EBADMSG;
case MICRON_STATUS_ECC_1TO3_BITFLIPS:
return 3;
case MICRON_STATUS_ECC_4TO6_BITFLIPS:
return 6;
case MICRON_STATUS_ECC_7TO8_BITFLIPS:
return 8;
default:
break;
}
return -EINVAL;
}
static const struct spinand_info micron_spinand_table[] = {
/* M79A 2Gb 3.3V */
SPINAND_INFO("MT29F2G01ABAGD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
&x4_write_cache_variants,
&x4_update_cache_variants),
0,
SPINAND_ECCINFO(µn_8_ooblayout,
micron_8_ecc_get_status)),
/* M79A 2Gb 1.8V */
SPINAND_INFO("MT29F2G01ABBGD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
&x4_write_cache_variants,
&x4_update_cache_variants),
0,
SPINAND_ECCINFO(µn_8_ooblayout,
micron_8_ecc_get_status)),
/* M78A 1Gb 3.3V */
SPINAND_INFO("MT29F1G01ABAFD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
&x4_write_cache_variants,
&x4_update_cache_variants),
0,
SPINAND_ECCINFO(µn_8_ooblayout,
micron_8_ecc_get_status)),
/* M78A 1Gb 1.8V */
SPINAND_INFO("MT29F1G01ABAFD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
&x4_write_cache_variants,
&x4_update_cache_variants),
0,
SPINAND_ECCINFO(µn_8_ooblayout,
micron_8_ecc_get_status)),
/* M79A 4Gb 3.3V */
SPINAND_INFO("MT29F4G01ADAGD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x36),
NAND_MEMORG(1, 2048, 128, 64, 2048, 80, 2, 1, 2),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
&x4_write_cache_variants,
&x4_update_cache_variants),
0,
SPINAND_ECCINFO(µn_8_ooblayout,
micron_8_ecc_get_status),
SPINAND_SELECT_TARGET(micron_select_target)),
/* M70A 4Gb 3.3V */
SPINAND_INFO("MT29F4G01ABAFD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x34),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
&x4_write_cache_variants,
&x4_update_cache_variants),
SPINAND_HAS_CR_FEAT_BIT,
SPINAND_ECCINFO(µn_8_ooblayout,
micron_8_ecc_get_status)),
/* M70A 4Gb 1.8V */
SPINAND_INFO("MT29F4G01ABBFD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
&x4_write_cache_variants,
&x4_update_cache_variants),
SPINAND_HAS_CR_FEAT_BIT,
SPINAND_ECCINFO(µn_8_ooblayout,
micron_8_ecc_get_status)),
/* M70A 8Gb 3.3V */
SPINAND_INFO("MT29F8G01ADAFD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x46),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
&x4_write_cache_variants,
&x4_update_cache_variants),
SPINAND_HAS_CR_FEAT_BIT,
SPINAND_ECCINFO(µn_8_ooblayout,
micron_8_ecc_get_status),
SPINAND_SELECT_TARGET(micron_select_target)),
/* M70A 8Gb 1.8V */
SPINAND_INFO("MT29F8G01ADBFD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x47),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
&x4_write_cache_variants,
&x4_update_cache_variants),
SPINAND_HAS_CR_FEAT_BIT,
SPINAND_ECCINFO(µn_8_ooblayout,
micron_8_ecc_get_status),
SPINAND_SELECT_TARGET(micron_select_target)),
/* M69A 2Gb 3.3V */
SPINAND_INFO("MT29F2G01AAAED",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x9F),
NAND_MEMORG(1, 2048, 64, 64, 2048, 80, 2, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&x4_read_cache_variants,
&x1_write_cache_variants,
&x1_update_cache_variants),
0,
SPINAND_ECCINFO(µn_4_ooblayout, NULL)),
};
static int micron_spinand_init(struct spinand_device *spinand)
{
/*
* M70A device series enable Continuous Read feature at Power-up,
* which is not supported. Disable this bit to avoid any possible
* failure.
*/
if (spinand->flags & SPINAND_HAS_CR_FEAT_BIT)
return spinand_upd_cfg(spinand, MICRON_CFG_CR, 0);
return 0;
}
static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = {
.init = micron_spinand_init,
};
const struct spinand_manufacturer micron_spinand_manufacturer = {
.id = SPINAND_MFR_MICRON,
.name = "Micron",
.chips = micron_spinand_table,
.nchips = ARRAY_SIZE(micron_spinand_table),
.ops = µn_spinand_manuf_ops,
};
| linux-master | drivers/mtd/nand/spi/micron.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author:
* Chuanhong Guo <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_GIGADEVICE 0xC8
#define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4)
#define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4)
#define GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS (1 << 4)
#define GD5FXGQ5XE_STATUS_ECC_4_BITFLIPS (3 << 4)
#define GD5FXGQXXEXXG_REG_STATUS2 0xf0
#define GD5FXGQ4UXFXXG_STATUS_ECC_MASK (7 << 4)
#define GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS (0 << 4)
#define GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS (1 << 4)
#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_f,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_1gq5,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_2gq5,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 3)
return -ERANGE;
region->offset = (16 * section) + 8;
region->length = 8;
return 0;
}
static int gd5fxgq4xa_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 3)
return -ERANGE;
if (section) {
region->offset = 16 * section;
region->length = 8;
} else {
/* section 0 has one byte reserved for bad block mark */
region->offset = 1;
region->length = 7;
}
return 0;
}
static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = {
.ecc = gd5fxgq4xa_ooblayout_ecc,
.free = gd5fxgq4xa_ooblayout_free,
};
static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
return 0;
case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
/* 1-7 bits are flipped. return the maximum. */
return 7;
case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
return 8;
case STATUS_ECC_UNCOR_ERROR:
return -EBADMSG;
default:
break;
}
return -EINVAL;
}
static int gd5fxgqx_variant2_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
region->offset = 64;
region->length = 64;
return 0;
}
static int gd5fxgqx_variant2_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
/* Reserve 1 bytes for the BBM. */
region->offset = 1;
region->length = 63;
return 0;
}
/* Valid for Q4/Q5 and Q6 (untested) devices */
static const struct mtd_ooblayout_ops gd5fxgqx_variant2_ooblayout = {
.ecc = gd5fxgqx_variant2_ooblayout_ecc,
.free = gd5fxgqx_variant2_ooblayout_free,
};
static int gd5fxgq4xc_ooblayout_256_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section)
return -ERANGE;
oobregion->offset = 128;
oobregion->length = 128;
return 0;
}
static int gd5fxgq4xc_ooblayout_256_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section)
return -ERANGE;
oobregion->offset = 1;
oobregion->length = 127;
return 0;
}
static const struct mtd_ooblayout_ops gd5fxgq4xc_oob_256_ops = {
.ecc = gd5fxgq4xc_ooblayout_256_ecc,
.free = gd5fxgq4xc_ooblayout_256_free,
};
static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
u8 status2;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
&status2);
int ret;
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
return 0;
case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
/*
* Read status2 register to determine a more fine grained
* bit error status
*/
ret = spi_mem_exec_op(spinand->spimem, &op);
if (ret)
return ret;
/*
* 4 ... 7 bits are flipped (1..4 can't be detected, so
* report the maximum of 4 in this case
*/
/* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
return ((status & STATUS_ECC_MASK) >> 2) |
((status2 & STATUS_ECC_MASK) >> 4);
case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
return 8;
case STATUS_ECC_UNCOR_ERROR:
return -EBADMSG;
default:
break;
}
return -EINVAL;
}
static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
u8 status2;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
&status2);
int ret;
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
return 0;
case GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS:
/*
* Read status2 register to determine a more fine grained
* bit error status
*/
ret = spi_mem_exec_op(spinand->spimem, &op);
if (ret)
return ret;
/*
* 1 ... 4 bits are flipped (and corrected)
*/
/* bits sorted this way (1...0): ECCSE1, ECCSE0 */
return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
case STATUS_ECC_UNCOR_ERROR:
return -EBADMSG;
default:
break;
}
return -EINVAL;
}
static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
switch (status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) {
case GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS:
return 0;
case GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS:
return 3;
case GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR:
return -EBADMSG;
default: /* (2 << 4) through (6 << 4) are 4-8 corrected errors */
return ((status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) >> 4) + 2;
}
return -EINVAL;
}
static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_INFO("GD5F1GQ4xA",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf1),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
SPINAND_INFO("GD5F2GQ4xA",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf2),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
SPINAND_INFO("GD5F4GQ4xA",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xf4),
NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
SPINAND_INFO("GD5F4GQ4RC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xa4, 0x68),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgq4xc_oob_256_ops,
gd5fxgq4ufxxg_ecc_get_status)),
SPINAND_INFO("GD5F4GQ4UC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb4, 0x68),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgq4xc_oob_256_ops,
gd5fxgq4ufxxg_ecc_get_status)),
SPINAND_INFO("GD5F1GQ4UExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd1),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
SPINAND_INFO("GD5F1GQ4RExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xc1),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
SPINAND_INFO("GD5F2GQ4UExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd2),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
SPINAND_INFO("GD5F2GQ4RExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xc2),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
SPINAND_INFO("GD5F1GQ4UFxxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb1, 0x48),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4ufxxg_ecc_get_status)),
SPINAND_INFO("GD5F1GQ5UExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x51),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq5xexxg_ecc_get_status)),
SPINAND_INFO("GD5F1GQ5RExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x41),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq5xexxg_ecc_get_status)),
SPINAND_INFO("GD5F2GQ5UExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x52),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq5xexxg_ecc_get_status)),
SPINAND_INFO("GD5F2GQ5RExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x42),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq5xexxg_ecc_get_status)),
SPINAND_INFO("GD5F4GQ6UExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x55),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 2, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq5xexxg_ecc_get_status)),
SPINAND_INFO("GD5F4GQ6RExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x45),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 2, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq5xexxg_ecc_get_status)),
SPINAND_INFO("GD5F1GM7UExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x91),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
SPINAND_INFO("GD5F1GM7RExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x81),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
SPINAND_INFO("GD5F2GM7UExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
SPINAND_INFO("GD5F2GM7RExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x82),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
SPINAND_INFO("GD5F4GM8UExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x95),
NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
SPINAND_INFO("GD5F4GM8RExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x85),
NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
SPINAND_INFO("GD5F2GQ5xExxH",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_2gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
SPINAND_INFO("GD5F1GQ5RExxH",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x21),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
SPINAND_INFO("GD5F1GQ4RExxH",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xc9),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
};
static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
};
const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
.id = SPINAND_MFR_GIGADEVICE,
.name = "GigaDevice",
.chips = gigadevice_spinand_table,
.nchips = ARRAY_SIZE(gigadevice_spinand_table),
.ops = &gigadevice_spinand_manuf_ops,
};
| linux-master | drivers/mtd/nand/spi/gigadevice.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author: Mario Kicherer <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_ALLIANCEMEMORY 0x52
#define AM_STATUS_ECC_BITMASK (3 << 4)
#define AM_STATUS_ECC_NONE_DETECTED (0 << 4)
#define AM_STATUS_ECC_CORRECTED (1 << 4)
#define AM_STATUS_ECC_ERRORED (2 << 4)
#define AM_STATUS_ECC_MAX_CORRECTED (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
static int am_get_eccsize(struct mtd_info *mtd)
{
if (mtd->oobsize == 64)
return 0x20;
else if (mtd->oobsize == 128)
return 0x38;
else if (mtd->oobsize == 256)
return 0x70;
else
return -EINVAL;
}
static int am_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
int ecc_bytes;
ecc_bytes = am_get_eccsize(mtd);
if (ecc_bytes < 0)
return ecc_bytes;
region->offset = mtd->oobsize - ecc_bytes;
region->length = ecc_bytes;
return 0;
}
static int am_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
int ecc_bytes;
if (section)
return -ERANGE;
ecc_bytes = am_get_eccsize(mtd);
if (ecc_bytes < 0)
return ecc_bytes;
/*
* It is unclear how many bytes are used for the bad block marker. We
* reserve the common two bytes here.
*
* The free area in this kind of flash is divided into chunks where the
* first 4 bytes of each chunk are unprotected. The number of chunks
* depends on the specific model. The models with 4096+256 bytes pages
* have 8 chunks, the others 4 chunks.
*/
region->offset = 2;
region->length = mtd->oobsize - 2 - ecc_bytes;
return 0;
}
static const struct mtd_ooblayout_ops am_ooblayout = {
.ecc = am_ooblayout_ecc,
.free = am_ooblayout_free,
};
static int am_ecc_get_status(struct spinand_device *spinand, u8 status)
{
switch (status & AM_STATUS_ECC_BITMASK) {
case AM_STATUS_ECC_NONE_DETECTED:
return 0;
case AM_STATUS_ECC_CORRECTED:
/*
* use oobsize to determine the flash model and the maximum of
* correctable errors and return maximum - 1 by convention
*/
if (spinand->base.mtd.oobsize == 64)
return 3;
else
return 7;
case AM_STATUS_ECC_ERRORED:
return -EBADMSG;
case AM_STATUS_ECC_MAX_CORRECTED:
/*
* use oobsize to determine the flash model and the maximum of
* correctable errors
*/
if (spinand->base.mtd.oobsize == 64)
return 4;
else
return 8;
default:
break;
}
return -EINVAL;
}
static const struct spinand_info alliancememory_spinand_table[] = {
SPINAND_INFO("AS5F34G04SND",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x2f),
NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&am_ooblayout,
am_ecc_get_status)),
};
static const struct spinand_manufacturer_ops alliancememory_spinand_manuf_ops = {
};
const struct spinand_manufacturer alliancememory_spinand_manufacturer = {
.id = SPINAND_MFR_ALLIANCEMEMORY,
.name = "AllianceMemory",
.chips = alliancememory_spinand_table,
.nchips = ARRAY_SIZE(alliancememory_spinand_table),
.ops = &alliancememory_spinand_manuf_ops,
};
| linux-master | drivers/mtd/nand/spi/alliancememory.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 Macronix
*
* Author: Boris Brezillon <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_MACRONIX 0xC2
#define MACRONIX_ECCSR_MASK 0x0F
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
static int mx35lfxge4ab_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
return -ERANGE;
}
static int mx35lfxge4ab_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
region->offset = 2;
region->length = mtd->oobsize - 2;
return 0;
}
static const struct mtd_ooblayout_ops mx35lfxge4ab_ooblayout = {
.ecc = mx35lfxge4ab_ooblayout_ecc,
.free = mx35lfxge4ab_ooblayout_free,
};
static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
{
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_DUMMY(1, 1),
SPI_MEM_OP_DATA_IN(1, eccsr, 1));
int ret = spi_mem_exec_op(spinand->spimem, &op);
if (ret)
return ret;
*eccsr &= MACRONIX_ECCSR_MASK;
return 0;
}
static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 eccsr;
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
return 0;
case STATUS_ECC_UNCOR_ERROR:
return -EBADMSG;
case STATUS_ECC_HAS_BITFLIPS:
/*
* Let's try to retrieve the real maximum number of bitflips
* in order to avoid forcing the wear-leveling layer to move
* data around if it's not necessary.
*/
if (mx35lf1ge4ab_get_eccsr(spinand, spinand->scratchbuf))
return nanddev_get_ecc_conf(nand)->strength;
eccsr = *spinand->scratchbuf;
if (WARN_ON(eccsr > nanddev_get_ecc_conf(nand)->strength ||
!eccsr))
return nanddev_get_ecc_conf(nand)->strength;
return eccsr;
default:
break;
}
return -EINVAL;
}
static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO("MX35LF1GE4AB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35LF2GE4AB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x22),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX35LF2GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x26),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35LF4GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37),
NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35LF1G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX35LF2G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX35LF4G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
SPINAND_INFO("MX31LF1GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX31UF1GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x9e),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35LF2G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x20),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF4G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb5),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF4GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF2G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa0),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF2G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa4),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF2GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF2GE4AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF1G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x90),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF1G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x94),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF1GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35UF1GE4AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX31LF2GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x2e),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX3UF2GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
};
static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
};
const struct spinand_manufacturer macronix_spinand_manufacturer = {
.id = SPINAND_MFR_MACRONIX,
.name = "Macronix",
.chips = macronix_spinand_table,
.nchips = ARRAY_SIZE(macronix_spinand_table),
.ops = ¯onix_spinand_manuf_ops,
};
| linux-master | drivers/mtd/nand/spi/macronix.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 exceet electronics GmbH
* Copyright (c) 2018 Kontron Electronics GmbH
*
* Author: Frieder Schrempf <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
/* Kioxia is new name of Toshiba memory. */
#define SPINAND_MFR_TOSHIBA 0x98
#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_x4_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_x4_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
/*
* Backward compatibility for 1st generation Serial NAND devices
* which don't support Quad Program Load operation.
*/
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD(false, 0, NULL, 0));
static int tx58cxgxsxraix_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 0)
return -ERANGE;
region->offset = mtd->oobsize / 2;
region->length = mtd->oobsize / 2;
return 0;
}
static int tx58cxgxsxraix_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 0)
return -ERANGE;
/* 2 bytes reserved for BBM */
region->offset = 2;
region->length = (mtd->oobsize / 2) - 2;
return 0;
}
static const struct mtd_ooblayout_ops tx58cxgxsxraix_ooblayout = {
.ecc = tx58cxgxsxraix_ooblayout_ecc,
.free = tx58cxgxsxraix_ooblayout_free,
};
static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
return 0;
case STATUS_ECC_UNCOR_ERROR:
return -EBADMSG;
case STATUS_ECC_HAS_BITFLIPS:
case TOSH_STATUS_ECC_HAS_BITFLIPS_T:
/*
* Let's try to retrieve the real maximum number of bitflips
* in order to avoid forcing the wear-leveling layer to move
* data around if it's not necessary.
*/
if (spi_mem_exec_op(spinand->spimem, &op))
return nanddev_get_ecc_conf(nand)->strength;
mbf = *(spinand->scratchbuf) >> 4;
if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf))
return nanddev_get_ecc_conf(nand)->strength;
return mbf;
default:
break;
}
return -EINVAL;
}
static const struct spinand_info toshiba_spinand_table[] = {
/* 3.3V 1Gb (1st generation) */
SPINAND_INFO("TC58CVG0S3HRAIG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xC2),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
/* 3.3V 2Gb (1st generation) */
SPINAND_INFO("TC58CVG1S3HRAIG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCB),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
/* 3.3V 4Gb (1st generation) */
SPINAND_INFO("TC58CVG2S0HRAIG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xCD),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
/* 1.8V 1Gb (1st generation) */
SPINAND_INFO("TC58CYG0S3HRAIG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xB2),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
/* 1.8V 2Gb (1st generation) */
SPINAND_INFO("TC58CYG1S3HRAIG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBB),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
/* 1.8V 4Gb (1st generation) */
SPINAND_INFO("TC58CYG2S0HRAIG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xBD),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
/*
* 2nd generation serial nand has HOLD_D which is equivalent to
* QE_BIT.
*/
/* 3.3V 1Gb (2nd generation) */
SPINAND_INFO("TC58CVG0S3HRAIJ",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE2),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_x4_variants,
&update_cache_x4_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
/* 3.3V 2Gb (2nd generation) */
SPINAND_INFO("TC58CVG1S3HRAIJ",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xEB),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_x4_variants,
&update_cache_x4_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
/* 3.3V 4Gb (2nd generation) */
SPINAND_INFO("TC58CVG2S0HRAIJ",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xED),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_x4_variants,
&update_cache_x4_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
/* 3.3V 8Gb (2nd generation) */
SPINAND_INFO("TH58CVG3S0HRAIJ",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE4),
NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_x4_variants,
&update_cache_x4_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
/* 1.8V 1Gb (2nd generation) */
SPINAND_INFO("TC58CYG0S3HRAIJ",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD2),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_x4_variants,
&update_cache_x4_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
/* 1.8V 2Gb (2nd generation) */
SPINAND_INFO("TC58CYG1S3HRAIJ",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDB),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_x4_variants,
&update_cache_x4_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
/* 1.8V 4Gb (2nd generation) */
SPINAND_INFO("TC58CYG2S0HRAIJ",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xDD),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_x4_variants,
&update_cache_x4_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
/* 1.8V 8Gb (2nd generation) */
SPINAND_INFO("TH58CYG3S0HRAIJ",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xD4),
NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_x4_variants,
&update_cache_x4_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
/* 1.8V 1Gb (1st generation) */
SPINAND_INFO("TC58NYG0S3HBAI4",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA1),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
/* 1.8V 4Gb (1st generation) */
SPINAND_INFO("TH58NYG2S3HBAI4",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xAC),
NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 2, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_x4_variants,
&update_cache_x4_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
/* 1.8V 8Gb (1st generation) */
SPINAND_INFO("TH58NYG3S0HBAI6",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA3),
NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_x4_variants,
&update_cache_x4_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
};
static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = {
};
const struct spinand_manufacturer toshiba_spinand_manufacturer = {
.id = SPINAND_MFR_TOSHIBA,
.name = "Toshiba",
.chips = toshiba_spinand_table,
.nchips = ARRAY_SIZE(toshiba_spinand_table),
.ops = &toshiba_spinand_manuf_ops,
};
| linux-master | drivers/mtd/nand/spi/toshiba.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017 exceet electronics GmbH
*
* Authors:
* Frieder Schrempf <[email protected]>
* Boris Brezillon <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_WINBOND 0xEF
#define WINBOND_CFG_BUF_READ BIT(3)
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
static int w25m02gv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 3)
return -ERANGE;
region->offset = (16 * section) + 8;
region->length = 8;
return 0;
}
static int w25m02gv_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 3)
return -ERANGE;
region->offset = (16 * section) + 2;
region->length = 6;
return 0;
}
static const struct mtd_ooblayout_ops w25m02gv_ooblayout = {
.ecc = w25m02gv_ooblayout_ecc,
.free = w25m02gv_ooblayout_free,
};
static int w25m02gv_select_target(struct spinand_device *spinand,
unsigned int target)
{
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0xc2, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(1,
spinand->scratchbuf,
1));
*spinand->scratchbuf = target;
return spi_mem_exec_op(spinand->spimem, &op);
}
static int w25n02kv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 3)
return -ERANGE;
region->offset = 64 + (16 * section);
region->length = 13;
return 0;
}
static int w25n02kv_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 3)
return -ERANGE;
region->offset = (16 * section) + 2;
region->length = 14;
return 0;
}
static const struct mtd_ooblayout_ops w25n02kv_ooblayout = {
.ecc = w25n02kv_ooblayout_ecc,
.free = w25n02kv_ooblayout_free,
};
static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
return 0;
case STATUS_ECC_UNCOR_ERROR:
return -EBADMSG;
case STATUS_ECC_HAS_BITFLIPS:
/*
* Let's try to retrieve the real maximum number of bitflips
* in order to avoid forcing the wear-leveling layer to move
* data around if it's not necessary.
*/
if (spi_mem_exec_op(spinand->spimem, &op))
return nanddev_get_ecc_conf(nand)->strength;
mbf = *(spinand->scratchbuf) >> 4;
if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf))
return nanddev_get_ecc_conf(nand)->strength;
return mbf;
default:
break;
}
return -EINVAL;
}
static const struct spinand_info winbond_spinand_table[] = {
SPINAND_INFO("W25M02GV",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 2),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
SPINAND_SELECT_TARGET(w25m02gv_select_target)),
SPINAND_INFO("W25N01GV",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x21),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
SPINAND_INFO("W25N02KV",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
};
static int winbond_spinand_init(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int i;
/*
* Make sure all dies are in buffer read mode and not continuous read
* mode.
*/
for (i = 0; i < nand->memorg.ntargets; i++) {
spinand_select_target(spinand, i);
spinand_upd_cfg(spinand, WINBOND_CFG_BUF_READ,
WINBOND_CFG_BUF_READ);
}
return 0;
}
static const struct spinand_manufacturer_ops winbond_spinand_manuf_ops = {
.init = winbond_spinand_init,
};
const struct spinand_manufacturer winbond_spinand_manufacturer = {
.id = SPINAND_MFR_WINBOND,
.name = "Winbond",
.chips = winbond_spinand_table,
.nchips = ARRAY_SIZE(winbond_spinand_table),
.ops = &winbond_spinand_manuf_ops,
};
| linux-master | drivers/mtd/nand/spi/winbond.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author:
* Felix Matouschek <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_XTX 0x0B
#define XT26G0XA_STATUS_ECC_MASK GENMASK(5, 2)
#define XT26G0XA_STATUS_ECC_NO_DETECTED (0 << 2)
#define XT26G0XA_STATUS_ECC_8_CORRECTED (3 << 4)
#define XT26G0XA_STATUS_ECC_UNCOR_ERROR (2 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
static int xt26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
region->offset = 48;
region->length = 16;
return 0;
}
static int xt26g0xa_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
region->offset = 1;
region->length = 47;
return 0;
}
static const struct mtd_ooblayout_ops xt26g0xa_ooblayout = {
.ecc = xt26g0xa_ooblayout_ecc,
.free = xt26g0xa_ooblayout_free,
};
static int xt26g0xa_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
status = status & XT26G0XA_STATUS_ECC_MASK;
switch (status) {
case XT26G0XA_STATUS_ECC_NO_DETECTED:
return 0;
case XT26G0XA_STATUS_ECC_8_CORRECTED:
return 8;
case XT26G0XA_STATUS_ECC_UNCOR_ERROR:
return -EBADMSG;
default:
break;
}
/* At this point values greater than (2 << 4) are invalid */
if (status > XT26G0XA_STATUS_ECC_UNCOR_ERROR)
return -EINVAL;
/* (1 << 2) through (7 << 2) are 1-7 corrected errors */
return status >> 2;
}
static const struct spinand_info xtx_spinand_table[] = {
SPINAND_INFO("XT26G01A",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xE1),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&xt26g0xa_ooblayout,
xt26g0xa_ecc_get_status)),
SPINAND_INFO("XT26G02A",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xE2),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&xt26g0xa_ooblayout,
xt26g0xa_ecc_get_status)),
SPINAND_INFO("XT26G04A",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xE3),
NAND_MEMORG(1, 2048, 64, 128, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&xt26g0xa_ooblayout,
xt26g0xa_ecc_get_status)),
};
static const struct spinand_manufacturer_ops xtx_spinand_manuf_ops = {
};
const struct spinand_manufacturer xtx_spinand_manufacturer = {
.id = SPINAND_MFR_XTX,
.name = "XTX",
.chips = xtx_spinand_table,
.nchips = ARRAY_SIZE(xtx_spinand_table),
.ops = &xtx_spinand_manuf_ops,
};
| linux-master | drivers/mtd/nand/spi/xtx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016-2017 Micron Technology, Inc.
*
* Authors:
* Peter Pan <[email protected]>
* Boris Brezillon <[email protected]>
*/
#define pr_fmt(fmt) "spi-nand: " fmt
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/spinand.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
spinand->scratchbuf);
int ret;
ret = spi_mem_exec_op(spinand->spimem, &op);
if (ret)
return ret;
*val = *spinand->scratchbuf;
return 0;
}
static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
{
struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
spinand->scratchbuf);
*spinand->scratchbuf = val;
return spi_mem_exec_op(spinand->spimem, &op);
}
static int spinand_read_status(struct spinand_device *spinand, u8 *status)
{
return spinand_read_reg_op(spinand, REG_STATUS, status);
}
static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
{
struct nand_device *nand = spinand_to_nand(spinand);
if (WARN_ON(spinand->cur_target < 0 ||
spinand->cur_target >= nand->memorg.ntargets))
return -EINVAL;
*cfg = spinand->cfg_cache[spinand->cur_target];
return 0;
}
static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
{
struct nand_device *nand = spinand_to_nand(spinand);
int ret;
if (WARN_ON(spinand->cur_target < 0 ||
spinand->cur_target >= nand->memorg.ntargets))
return -EINVAL;
if (spinand->cfg_cache[spinand->cur_target] == cfg)
return 0;
ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
if (ret)
return ret;
spinand->cfg_cache[spinand->cur_target] = cfg;
return 0;
}
/**
* spinand_upd_cfg() - Update the configuration register
* @spinand: the spinand device
* @mask: the mask encoding the bits to update in the config reg
* @val: the new value to apply
*
* Update the configuration register.
*
* Return: 0 on success, a negative error code otherwise.
*/
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
{
int ret;
u8 cfg;
ret = spinand_get_cfg(spinand, &cfg);
if (ret)
return ret;
cfg &= ~mask;
cfg |= val;
return spinand_set_cfg(spinand, cfg);
}
/**
* spinand_select_target() - Select a specific NAND target/die
* @spinand: the spinand device
* @target: the target/die to select
*
* Select a new target/die. If chip only has one die, this function is a NOOP.
*
* Return: 0 on success, a negative error code otherwise.
*/
int spinand_select_target(struct spinand_device *spinand, unsigned int target)
{
struct nand_device *nand = spinand_to_nand(spinand);
int ret;
if (WARN_ON(target >= nand->memorg.ntargets))
return -EINVAL;
if (spinand->cur_target == target)
return 0;
if (nand->memorg.ntargets == 1) {
spinand->cur_target = target;
return 0;
}
ret = spinand->select_target(spinand, target);
if (ret)
return ret;
spinand->cur_target = target;
return 0;
}
static int spinand_read_cfg(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int target;
int ret;
for (target = 0; target < nand->memorg.ntargets; target++) {
ret = spinand_select_target(spinand, target);
if (ret)
return ret;
/*
* We use spinand_read_reg_op() instead of spinand_get_cfg()
* here to bypass the config cache.
*/
ret = spinand_read_reg_op(spinand, REG_CFG,
&spinand->cfg_cache[target]);
if (ret)
return ret;
}
return 0;
}
static int spinand_init_cfg_cache(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
struct device *dev = &spinand->spimem->spi->dev;
spinand->cfg_cache = devm_kcalloc(dev,
nand->memorg.ntargets,
sizeof(*spinand->cfg_cache),
GFP_KERNEL);
if (!spinand->cfg_cache)
return -ENOMEM;
return 0;
}
static int spinand_init_quad_enable(struct spinand_device *spinand)
{
bool enable = false;
if (!(spinand->flags & SPINAND_HAS_QE_BIT))
return 0;
if (spinand->op_templates.read_cache->data.buswidth == 4 ||
spinand->op_templates.write_cache->data.buswidth == 4 ||
spinand->op_templates.update_cache->data.buswidth == 4)
enable = true;
return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
enable ? CFG_QUAD_ENABLE : 0);
}
static int spinand_ecc_enable(struct spinand_device *spinand,
bool enable)
{
return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
enable ? CFG_ECC_ENABLE : 0);
}
static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
if (spinand->eccinfo.get_status)
return spinand->eccinfo.get_status(spinand, status);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
return 0;
case STATUS_ECC_HAS_BITFLIPS:
/*
* We have no way to know exactly how many bitflips have been
* fixed, so let's return the maximum possible value so that
* wear-leveling layers move the data immediately.
*/
return nanddev_get_ecc_conf(nand)->strength;
case STATUS_ECC_UNCOR_ERROR:
return -EBADMSG;
default:
break;
}
return -EINVAL;
}
static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
return -ERANGE;
}
static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
/* Reserve 2 bytes for the BBM. */
region->offset = 2;
region->length = 62;
return 0;
}
static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
.ecc = spinand_noecc_ooblayout_ecc,
.free = spinand_noecc_ooblayout_free,
};
static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
{
struct spinand_device *spinand = nand_to_spinand(nand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
struct spinand_ondie_ecc_conf *engine_conf;
nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
if (!engine_conf)
return -ENOMEM;
nand->ecc.ctx.priv = engine_conf;
if (spinand->eccinfo.ooblayout)
mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
else
mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
return 0;
}
static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
{
kfree(nand->ecc.ctx.priv);
}
static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
struct nand_page_io_req *req)
{
struct spinand_device *spinand = nand_to_spinand(nand);
bool enable = (req->mode != MTD_OPS_RAW);
memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
/* Only enable or disable the engine */
return spinand_ecc_enable(spinand, enable);
}
static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
struct nand_page_io_req *req)
{
struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
struct spinand_device *spinand = nand_to_spinand(nand);
struct mtd_info *mtd = spinand_to_mtd(spinand);
int ret;
if (req->mode == MTD_OPS_RAW)
return 0;
/* Nothing to do when finishing a page write */
if (req->type == NAND_PAGE_WRITE)
return 0;
/* Finish a page read: check the status, report errors/bitflips */
ret = spinand_check_ecc_status(spinand, engine_conf->status);
if (ret == -EBADMSG)
mtd->ecc_stats.failed++;
else if (ret > 0)
mtd->ecc_stats.corrected += ret;
return ret;
}
static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
.init_ctx = spinand_ondie_ecc_init_ctx,
.cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
.prepare_io_req = spinand_ondie_ecc_prepare_io_req,
.finish_io_req = spinand_ondie_ecc_finish_io_req,
};
static struct nand_ecc_engine spinand_ondie_ecc_engine = {
.ops = &spinand_ondie_ecc_engine_ops,
};
static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
{
struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
engine_conf)
engine_conf->status = status;
}
static int spinand_write_enable_op(struct spinand_device *spinand)
{
struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
return spi_mem_exec_op(spinand->spimem, &op);
}
static int spinand_load_page_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
static int spinand_read_from_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
struct mtd_info *mtd = spinand_to_mtd(spinand);
struct spi_mem_dirmap_desc *rdesc;
unsigned int nbytes = 0;
void *buf = NULL;
u16 column = 0;
ssize_t ret;
if (req->datalen) {
buf = spinand->databuf;
nbytes = nanddev_page_size(nand);
column = 0;
}
if (req->ooblen) {
nbytes += nanddev_per_page_oobsize(nand);
if (!buf) {
buf = spinand->oobbuf;
column = nanddev_page_size(nand);
}
}
if (req->mode == MTD_OPS_RAW)
rdesc = spinand->dirmaps[req->pos.plane].rdesc;
else
rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;
while (nbytes) {
ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
if (ret < 0)
return ret;
if (!ret || ret > nbytes)
return -EIO;
nbytes -= ret;
column += ret;
buf += ret;
}
if (req->datalen)
memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
req->datalen);
if (req->ooblen) {
if (req->mode == MTD_OPS_AUTO_OOB)
mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
spinand->oobbuf,
req->ooboffs,
req->ooblen);
else
memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
req->ooblen);
}
return 0;
}
static int spinand_write_to_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
struct mtd_info *mtd = spinand_to_mtd(spinand);
struct spi_mem_dirmap_desc *wdesc;
unsigned int nbytes, column = 0;
void *buf = spinand->databuf;
ssize_t ret;
/*
* Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
* the cache content to 0xFF (depends on vendor implementation), so we
* must fill the page cache entirely even if we only want to program
* the data portion of the page, otherwise we might corrupt the BBM or
* user data previously programmed in OOB area.
*
* Only reset the data buffer manually, the OOB buffer is prepared by
* ECC engines ->prepare_io_req() callback.
*/
nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
memset(spinand->databuf, 0xff, nanddev_page_size(nand));
if (req->datalen)
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
req->datalen);
if (req->ooblen) {
if (req->mode == MTD_OPS_AUTO_OOB)
mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
spinand->oobbuf,
req->ooboffs,
req->ooblen);
else
memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
req->ooblen);
}
if (req->mode == MTD_OPS_RAW)
wdesc = spinand->dirmaps[req->pos.plane].wdesc;
else
wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;
while (nbytes) {
ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
if (ret < 0)
return ret;
if (!ret || ret > nbytes)
return -EIO;
nbytes -= ret;
column += ret;
buf += ret;
}
return 0;
}
static int spinand_program_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
static int spinand_erase_op(struct spinand_device *spinand,
const struct nand_pos *pos)
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, pos);
struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
static int spinand_wait(struct spinand_device *spinand,
unsigned long initial_delay_us,
unsigned long poll_delay_us,
u8 *s)
{
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
spinand->scratchbuf);
u8 status;
int ret;
ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
initial_delay_us,
poll_delay_us,
SPINAND_WAITRDY_TIMEOUT_MS);
if (ret)
return ret;
status = *spinand->scratchbuf;
if (!(status & STATUS_BUSY))
goto out;
/*
* Extra read, just in case the STATUS_READY bit has changed
* since our last check
*/
ret = spinand_read_status(spinand, &status);
if (ret)
return ret;
out:
if (s)
*s = status;
return status & STATUS_BUSY ? -ETIMEDOUT : 0;
}
static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
u8 ndummy, u8 *buf)
{
struct spi_mem_op op = SPINAND_READID_OP(
naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
int ret;
ret = spi_mem_exec_op(spinand->spimem, &op);
if (!ret)
memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
return ret;
}
static int spinand_reset_op(struct spinand_device *spinand)
{
struct spi_mem_op op = SPINAND_RESET_OP;
int ret;
ret = spi_mem_exec_op(spinand->spimem, &op);
if (ret)
return ret;
return spinand_wait(spinand,
SPINAND_RESET_INITIAL_DELAY_US,
SPINAND_RESET_POLL_DELAY_US,
NULL);
}
static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
{
return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
}
static int spinand_read_page(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
int ret;
ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
if (ret)
return ret;
ret = spinand_load_page_op(spinand, req);
if (ret)
return ret;
ret = spinand_wait(spinand,
SPINAND_READ_INITIAL_DELAY_US,
SPINAND_READ_POLL_DELAY_US,
&status);
if (ret < 0)
return ret;
spinand_ondie_ecc_save_status(nand, status);
ret = spinand_read_from_cache_op(spinand, req);
if (ret)
return ret;
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
static int spinand_write_page(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
int ret;
ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
if (ret)
return ret;
ret = spinand_write_enable_op(spinand);
if (ret)
return ret;
ret = spinand_write_to_cache_op(spinand, req);
if (ret)
return ret;
ret = spinand_program_op(spinand, req);
if (ret)
return ret;
ret = spinand_wait(spinand,
SPINAND_WRITE_INITIAL_DELAY_US,
SPINAND_WRITE_POLL_DELAY_US,
&status);
if (!ret && (status & STATUS_PROG_FAILED))
return -EIO;
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
struct mtd_ecc_stats old_stats;
unsigned int max_bitflips = 0;
struct nand_io_iter iter;
bool disable_ecc = false;
bool ecc_failed = false;
int ret = 0;
if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
disable_ecc = true;
mutex_lock(&spinand->lock);
old_stats = mtd->ecc_stats;
nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
if (disable_ecc)
iter.req.mode = MTD_OPS_RAW;
ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
ret = spinand_read_page(spinand, &iter.req);
if (ret < 0 && ret != -EBADMSG)
break;
if (ret == -EBADMSG)
ecc_failed = true;
else
max_bitflips = max_t(unsigned int, max_bitflips, ret);
ret = 0;
ops->retlen += iter.req.datalen;
ops->oobretlen += iter.req.ooblen;
}
if (ops->stats) {
ops->stats->uncorrectable_errors +=
mtd->ecc_stats.failed - old_stats.failed;
ops->stats->corrected_bitflips +=
mtd->ecc_stats.corrected - old_stats.corrected;
}
mutex_unlock(&spinand->lock);
if (ecc_failed && !ret)
ret = -EBADMSG;
return ret ? ret : max_bitflips;
}
static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
struct nand_io_iter iter;
bool disable_ecc = false;
int ret = 0;
if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
disable_ecc = true;
mutex_lock(&spinand->lock);
nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
if (disable_ecc)
iter.req.mode = MTD_OPS_RAW;
ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
ret = spinand_write_page(spinand, &iter.req);
if (ret)
break;
ops->retlen += iter.req.datalen;
ops->oobretlen += iter.req.ooblen;
}
mutex_unlock(&spinand->lock);
return ret;
}
static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
{
struct spinand_device *spinand = nand_to_spinand(nand);
u8 marker[2] = { };
struct nand_page_io_req req = {
.pos = *pos,
.ooblen = sizeof(marker),
.ooboffs = 0,
.oobbuf.in = marker,
.mode = MTD_OPS_RAW,
};
spinand_select_target(spinand, pos->target);
spinand_read_page(spinand, &req);
if (marker[0] != 0xff || marker[1] != 0xff)
return true;
return false;
}
static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
struct spinand_device *spinand = nand_to_spinand(nand);
struct nand_pos pos;
int ret;
nanddev_offs_to_pos(nand, offs, &pos);
mutex_lock(&spinand->lock);
ret = nanddev_isbad(nand, &pos);
mutex_unlock(&spinand->lock);
return ret;
}
static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
{
struct spinand_device *spinand = nand_to_spinand(nand);
u8 marker[2] = { };
struct nand_page_io_req req = {
.pos = *pos,
.ooboffs = 0,
.ooblen = sizeof(marker),
.oobbuf.out = marker,
.mode = MTD_OPS_RAW,
};
int ret;
ret = spinand_select_target(spinand, pos->target);
if (ret)
return ret;
ret = spinand_write_enable_op(spinand);
if (ret)
return ret;
return spinand_write_page(spinand, &req);
}
static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
struct spinand_device *spinand = nand_to_spinand(nand);
struct nand_pos pos;
int ret;
nanddev_offs_to_pos(nand, offs, &pos);
mutex_lock(&spinand->lock);
ret = nanddev_markbad(nand, &pos);
mutex_unlock(&spinand->lock);
return ret;
}
static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
{
struct spinand_device *spinand = nand_to_spinand(nand);
u8 status;
int ret;
ret = spinand_select_target(spinand, pos->target);
if (ret)
return ret;
ret = spinand_write_enable_op(spinand);
if (ret)
return ret;
ret = spinand_erase_op(spinand, pos);
if (ret)
return ret;
ret = spinand_wait(spinand,
SPINAND_ERASE_INITIAL_DELAY_US,
SPINAND_ERASE_POLL_DELAY_US,
&status);
if (!ret && (status & STATUS_ERASE_FAILED))
ret = -EIO;
return ret;
}
static int spinand_mtd_erase(struct mtd_info *mtd,
struct erase_info *einfo)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
int ret;
mutex_lock(&spinand->lock);
ret = nanddev_mtd_erase(mtd, einfo);
mutex_unlock(&spinand->lock);
return ret;
}
static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
struct nand_pos pos;
int ret;
nanddev_offs_to_pos(nand, offs, &pos);
mutex_lock(&spinand->lock);
ret = nanddev_isreserved(nand, &pos);
mutex_unlock(&spinand->lock);
return ret;
}
static int spinand_create_dirmap(struct spinand_device *spinand,
unsigned int plane)
{
struct nand_device *nand = spinand_to_nand(spinand);
struct spi_mem_dirmap_info info = {
.length = nanddev_page_size(nand) +
nanddev_per_page_oobsize(nand),
};
struct spi_mem_dirmap_desc *desc;
/* The plane number is passed in MSB just above the column address */
info.offset = plane << fls(nand->memorg.pagesize);
info.op_tmpl = *spinand->op_templates.update_cache;
desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
spinand->spimem, &info);
if (IS_ERR(desc))
return PTR_ERR(desc);
spinand->dirmaps[plane].wdesc = desc;
info.op_tmpl = *spinand->op_templates.read_cache;
desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
spinand->spimem, &info);
if (IS_ERR(desc))
return PTR_ERR(desc);
spinand->dirmaps[plane].rdesc = desc;
if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) {
spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc;
spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc;
return 0;
}
info.op_tmpl = *spinand->op_templates.update_cache;
info.op_tmpl.data.ecc = true;
desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
spinand->spimem, &info);
if (IS_ERR(desc))
return PTR_ERR(desc);
spinand->dirmaps[plane].wdesc_ecc = desc;
info.op_tmpl = *spinand->op_templates.read_cache;
info.op_tmpl.data.ecc = true;
desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
spinand->spimem, &info);
if (IS_ERR(desc))
return PTR_ERR(desc);
spinand->dirmaps[plane].rdesc_ecc = desc;
return 0;
}
static int spinand_create_dirmaps(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
int i, ret;
spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
sizeof(*spinand->dirmaps) *
nand->memorg.planes_per_lun,
GFP_KERNEL);
if (!spinand->dirmaps)
return -ENOMEM;
for (i = 0; i < nand->memorg.planes_per_lun; i++) {
ret = spinand_create_dirmap(spinand, i);
if (ret)
return ret;
}
return 0;
}
static const struct nand_ops spinand_ops = {
.erase = spinand_erase,
.markbad = spinand_markbad,
.isbad = spinand_isbad,
};
static const struct spinand_manufacturer *spinand_manufacturers[] = {
&alliancememory_spinand_manufacturer,
&ato_spinand_manufacturer,
&esmt_c8_spinand_manufacturer,
&gigadevice_spinand_manufacturer,
¯onix_spinand_manufacturer,
µn_spinand_manufacturer,
¶gon_spinand_manufacturer,
&toshiba_spinand_manufacturer,
&winbond_spinand_manufacturer,
&xtx_spinand_manufacturer,
};
static int spinand_manufacturer_match(struct spinand_device *spinand,
enum spinand_readid_method rdid_method)
{
u8 *id = spinand->id.data;
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
const struct spinand_manufacturer *manufacturer =
spinand_manufacturers[i];
if (id[0] != manufacturer->id)
continue;
ret = spinand_match_and_init(spinand,
manufacturer->chips,
manufacturer->nchips,
rdid_method);
if (ret < 0)
continue;
spinand->manufacturer = manufacturer;
return 0;
}
return -ENOTSUPP;
}
static int spinand_id_detect(struct spinand_device *spinand)
{
u8 *id = spinand->id.data;
int ret;
ret = spinand_read_id_op(spinand, 0, 0, id);
if (ret)
return ret;
ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
if (!ret)
return 0;
ret = spinand_read_id_op(spinand, 1, 0, id);
if (ret)
return ret;
ret = spinand_manufacturer_match(spinand,
SPINAND_READID_METHOD_OPCODE_ADDR);
if (!ret)
return 0;
ret = spinand_read_id_op(spinand, 0, 1, id);
if (ret)
return ret;
ret = spinand_manufacturer_match(spinand,
SPINAND_READID_METHOD_OPCODE_DUMMY);
return ret;
}
static int spinand_manufacturer_init(struct spinand_device *spinand)
{
if (spinand->manufacturer->ops->init)
return spinand->manufacturer->ops->init(spinand);
return 0;
}
static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
{
/* Release manufacturer private data */
if (spinand->manufacturer->ops->cleanup)
return spinand->manufacturer->ops->cleanup(spinand);
}
static const struct spi_mem_op *
spinand_select_op_variant(struct spinand_device *spinand,
const struct spinand_op_variants *variants)
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int i;
for (i = 0; i < variants->nops; i++) {
struct spi_mem_op op = variants->ops[i];
unsigned int nbytes;
int ret;
nbytes = nanddev_per_page_oobsize(nand) +
nanddev_page_size(nand);
while (nbytes) {
op.data.nbytes = nbytes;
ret = spi_mem_adjust_op_size(spinand->spimem, &op);
if (ret)
break;
if (!spi_mem_supports_op(spinand->spimem, &op))
break;
nbytes -= op.data.nbytes;
}
if (!nbytes)
return &variants->ops[i];
}
return NULL;
}
/**
* spinand_match_and_init() - Try to find a match between a device ID and an
* entry in a spinand_info table
* @spinand: SPI NAND object
* @table: SPI NAND device description table
* @table_size: size of the device description table
* @rdid_method: read id method to match
*
* Match between a device ID retrieved through the READ_ID command and an
* entry in the SPI NAND description table. If a match is found, the spinand
* object will be initialized with information provided by the matching
* spinand_info entry.
*
* Return: 0 on success, a negative error code otherwise.
*/
int spinand_match_and_init(struct spinand_device *spinand,
const struct spinand_info *table,
unsigned int table_size,
enum spinand_readid_method rdid_method)
{
u8 *id = spinand->id.data;
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int i;
for (i = 0; i < table_size; i++) {
const struct spinand_info *info = &table[i];
const struct spi_mem_op *op;
if (rdid_method != info->devid.method)
continue;
if (memcmp(id + 1, info->devid.id, info->devid.len))
continue;
nand->memorg = table[i].memorg;
nanddev_set_ecc_requirements(nand, &table[i].eccreq);
spinand->eccinfo = table[i].eccinfo;
spinand->flags = table[i].flags;
spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
op = spinand_select_op_variant(spinand,
info->op_variants.read_cache);
if (!op)
return -ENOTSUPP;
spinand->op_templates.read_cache = op;
op = spinand_select_op_variant(spinand,
info->op_variants.write_cache);
if (!op)
return -ENOTSUPP;
spinand->op_templates.write_cache = op;
op = spinand_select_op_variant(spinand,
info->op_variants.update_cache);
spinand->op_templates.update_cache = op;
return 0;
}
return -ENOTSUPP;
}
static int spinand_detect(struct spinand_device *spinand)
{
struct device *dev = &spinand->spimem->spi->dev;
struct nand_device *nand = spinand_to_nand(spinand);
int ret;
ret = spinand_reset_op(spinand);
if (ret)
return ret;
ret = spinand_id_detect(spinand);
if (ret) {
dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
spinand->id.data);
return ret;
}
if (nand->memorg.ntargets > 1 && !spinand->select_target) {
dev_err(dev,
"SPI NANDs with more than one die must implement ->select_target()\n");
return -EINVAL;
}
dev_info(&spinand->spimem->spi->dev,
"%s SPI NAND was found.\n", spinand->manufacturer->name);
dev_info(&spinand->spimem->spi->dev,
"%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
return 0;
}
static int spinand_init_flash(struct spinand_device *spinand)
{
struct device *dev = &spinand->spimem->spi->dev;
struct nand_device *nand = spinand_to_nand(spinand);
int ret, i;
ret = spinand_read_cfg(spinand);
if (ret)
return ret;
ret = spinand_init_quad_enable(spinand);
if (ret)
return ret;
ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
if (ret)
return ret;
ret = spinand_manufacturer_init(spinand);
if (ret) {
dev_err(dev,
"Failed to initialize the SPI NAND chip (err = %d)\n",
ret);
return ret;
}
/* After power up, all blocks are locked, so unlock them here. */
for (i = 0; i < nand->memorg.ntargets; i++) {
ret = spinand_select_target(spinand, i);
if (ret)
break;
ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
if (ret)
break;
}
if (ret)
spinand_manufacturer_cleanup(spinand);
return ret;
}
static void spinand_mtd_resume(struct mtd_info *mtd)
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
int ret;
ret = spinand_reset_op(spinand);
if (ret)
return;
ret = spinand_init_flash(spinand);
if (ret)
return;
spinand_ecc_enable(spinand, false);
}
static int spinand_init(struct spinand_device *spinand)
{
struct device *dev = &spinand->spimem->spi->dev;
struct mtd_info *mtd = spinand_to_mtd(spinand);
struct nand_device *nand = mtd_to_nanddev(mtd);
int ret;
/*
* We need a scratch buffer because the spi_mem interface requires that
* buf passed in spi_mem_op->data.buf be DMA-able.
*/
spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
if (!spinand->scratchbuf)
return -ENOMEM;
ret = spinand_detect(spinand);
if (ret)
goto err_free_bufs;
/*
* Use kzalloc() instead of devm_kzalloc() here, because some drivers
* may use this buffer for DMA access.
* Memory allocated by devm_ does not guarantee DMA-safe alignment.
*/
spinand->databuf = kzalloc(nanddev_page_size(nand) +
nanddev_per_page_oobsize(nand),
GFP_KERNEL);
if (!spinand->databuf) {
ret = -ENOMEM;
goto err_free_bufs;
}
spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
ret = spinand_init_cfg_cache(spinand);
if (ret)
goto err_free_bufs;
ret = spinand_init_flash(spinand);
if (ret)
goto err_free_bufs;
ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
if (ret)
goto err_manuf_cleanup;
/* SPI-NAND default ECC engine is on-die */
nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
spinand_ecc_enable(spinand, false);
ret = nanddev_ecc_engine_init(nand);
if (ret)
goto err_cleanup_nanddev;
mtd->_read_oob = spinand_mtd_read;
mtd->_write_oob = spinand_mtd_write;
mtd->_block_isbad = spinand_mtd_block_isbad;
mtd->_block_markbad = spinand_mtd_block_markbad;
mtd->_block_isreserved = spinand_mtd_block_isreserved;
mtd->_erase = spinand_mtd_erase;
mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
mtd->_resume = spinand_mtd_resume;
if (nand->ecc.engine) {
ret = mtd_ooblayout_count_freebytes(mtd);
if (ret < 0)
goto err_cleanup_ecc_engine;
}
mtd->oobavail = ret;
/* Propagate ECC information to mtd_info */
mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
ret = spinand_create_dirmaps(spinand);
if (ret) {
dev_err(dev,
"Failed to create direct mappings for read/write operations (err = %d)\n",
ret);
goto err_cleanup_ecc_engine;
}
return 0;
err_cleanup_ecc_engine:
nanddev_ecc_engine_cleanup(nand);
err_cleanup_nanddev:
nanddev_cleanup(nand);
err_manuf_cleanup:
spinand_manufacturer_cleanup(spinand);
err_free_bufs:
kfree(spinand->databuf);
kfree(spinand->scratchbuf);
return ret;
}
static void spinand_cleanup(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
nanddev_cleanup(nand);
spinand_manufacturer_cleanup(spinand);
kfree(spinand->databuf);
kfree(spinand->scratchbuf);
}
static int spinand_probe(struct spi_mem *mem)
{
struct spinand_device *spinand;
struct mtd_info *mtd;
int ret;
spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
GFP_KERNEL);
if (!spinand)
return -ENOMEM;
spinand->spimem = mem;
spi_mem_set_drvdata(mem, spinand);
spinand_set_of_node(spinand, mem->spi->dev.of_node);
mutex_init(&spinand->lock);
mtd = spinand_to_mtd(spinand);
mtd->dev.parent = &mem->spi->dev;
ret = spinand_init(spinand);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret)
goto err_spinand_cleanup;
return 0;
err_spinand_cleanup:
spinand_cleanup(spinand);
return ret;
}
static int spinand_remove(struct spi_mem *mem)
{
struct spinand_device *spinand;
struct mtd_info *mtd;
int ret;
spinand = spi_mem_get_drvdata(mem);
mtd = spinand_to_mtd(spinand);
ret = mtd_device_unregister(mtd);
if (ret)
return ret;
spinand_cleanup(spinand);
return 0;
}
static const struct spi_device_id spinand_ids[] = {
{ .name = "spi-nand" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(spi, spinand_ids);
#ifdef CONFIG_OF
static const struct of_device_id spinand_of_ids[] = {
{ .compatible = "spi-nand" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, spinand_of_ids);
#endif
static struct spi_mem_driver spinand_drv = {
.spidrv = {
.id_table = spinand_ids,
.driver = {
.name = "spi-nand",
.of_match_table = of_match_ptr(spinand_of_ids),
},
},
.probe = spinand_probe,
.remove = spinand_remove,
};
module_spi_mem_driver(spinand_drv);
MODULE_DESCRIPTION("SPI NAND framework");
MODULE_AUTHOR("Peter Pan<[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mtd/nand/spi/core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author:
* Chuanhong Guo <[email protected]> - the main driver logic
* Martin Kurbanov <[email protected]> - OOB layout
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
/* ESMT uses GigaDevice 0xc8 JECDEC ID on some SPI NANDs */
#define SPINAND_MFR_ESMT_C8 0xc8
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
/*
* OOB spare area map (64 bytes)
*
* Bad Block Markers
* filled by HW and kernel Reserved
* | +-----------------------+-----------------------+
* | | | |
* | | OOB free data Area |non ECC protected |
* | +-------------|-----+-----------------|-----+-----------------|-----+
* | | | | | | | |
* +-|---|----------+--|-----|--------------+--|-----|--------------+--|-----|--------------+
* | | | section0 | | | section1 | | | section2 | | | section3 |
* +-v-+-v-+---+----+--v--+--v--+-----+-----+--v--+--v--+-----+-----+--v--+--v--+-----+-----+
* | | | | | | | | | | | | | | | | |
* |0:1|2:3|4:7|8:15|16:17|18:19|20:23|24:31|32:33|34:35|36:39|40:47|48:49|50:51|52:55|56:63|
* | | | | | | | | | | | | | | | | |
* +---+---+-^-+--^-+-----+-----+--^--+--^--+-----+-----+--^--+--^--+-----+-----+--^--+--^--+
* | | | | | | | |
* | +----------------|-----+-----------------|-----+-----------------|-----+
* | ECC Area|(Main + Spare) - filled|by ESMT NAND HW |
* | | | |
* +---------------------+-----------------------+-----------------------+
* OOB ECC protected Area - not used due to
* partial programming from some filesystems
* (like JFFS2 with cleanmarkers)
*/
#define ESMT_OOB_SECTION_COUNT 4
#define ESMT_OOB_SECTION_SIZE(nand) \
(nanddev_per_page_oobsize(nand) / ESMT_OOB_SECTION_COUNT)
#define ESMT_OOB_FREE_SIZE(nand) \
(ESMT_OOB_SECTION_SIZE(nand) / 2)
#define ESMT_OOB_ECC_SIZE(nand) \
(ESMT_OOB_SECTION_SIZE(nand) - ESMT_OOB_FREE_SIZE(nand))
#define ESMT_OOB_BBM_SIZE 2
static int f50l1g41lb_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
if (section >= ESMT_OOB_SECTION_COUNT)
return -ERANGE;
region->offset = section * ESMT_OOB_SECTION_SIZE(nand) +
ESMT_OOB_FREE_SIZE(nand);
region->length = ESMT_OOB_ECC_SIZE(nand);
return 0;
}
static int f50l1g41lb_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
if (section >= ESMT_OOB_SECTION_COUNT)
return -ERANGE;
/*
* Reserve space for bad blocks markers (section0) and
* reserved bytes (sections 1-3)
*/
region->offset = section * ESMT_OOB_SECTION_SIZE(nand) + 2;
/* Use only 2 non-protected ECC bytes per each OOB section */
region->length = 2;
return 0;
}
static const struct mtd_ooblayout_ops f50l1g41lb_ooblayout = {
.ecc = f50l1g41lb_ooblayout_ecc,
.free = f50l1g41lb_ooblayout_free,
};
static const struct spinand_info esmt_c8_spinand_table[] = {
SPINAND_INFO("F50L1G41LB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
SPINAND_INFO("F50D1G41LB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
SPINAND_INFO("F50D2G41KA",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
};
static const struct spinand_manufacturer_ops esmt_spinand_manuf_ops = {
};
const struct spinand_manufacturer esmt_c8_spinand_manufacturer = {
.id = SPINAND_MFR_ESMT_C8,
.name = "ESMT",
.chips = esmt_c8_spinand_table,
.nchips = ARRAY_SIZE(esmt_c8_spinand_table),
.ops = &esmt_spinand_manuf_ops,
};
| linux-master | drivers/mtd/nand/spi/esmt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Aidan MacDonald
*
* Author: Aidan MacDonald <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_ATO 0x9b
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
static int ato25d1ga_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 3)
return -ERANGE;
region->offset = (16 * section) + 8;
region->length = 8;
return 0;
}
static int ato25d1ga_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 3)
return -ERANGE;
if (section) {
region->offset = (16 * section);
region->length = 8;
} else {
/* first byte of section 0 is reserved for the BBM */
region->offset = 1;
region->length = 7;
}
return 0;
}
static const struct mtd_ooblayout_ops ato25d1ga_ooblayout = {
.ecc = ato25d1ga_ooblayout_ecc,
.free = ato25d1ga_ooblayout_free,
};
static const struct spinand_info ato_spinand_table[] = {
SPINAND_INFO("ATO25D1GA",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x12),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&ato25d1ga_ooblayout, NULL)),
};
static const struct spinand_manufacturer_ops ato_spinand_manuf_ops = {
};
const struct spinand_manufacturer ato_spinand_manufacturer = {
.id = SPINAND_MFR_ATO,
.name = "ATO",
.chips = ato_spinand_table,
.nchips = ARRAY_SIZE(ato_spinand_table),
.ops = &ato_spinand_manuf_ops,
};
| linux-master | drivers/mtd/nand/spi/ato.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Jeff Kletsky
*
* Author: Jeff Kletsky <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_PARAGON 0xa1
#define PN26G0XA_STATUS_ECC_BITMASK (3 << 4)
#define PN26G0XA_STATUS_ECC_NONE_DETECTED (0 << 4)
#define PN26G0XA_STATUS_ECC_1_7_CORRECTED (1 << 4)
#define PN26G0XA_STATUS_ECC_ERRORED (2 << 4)
#define PN26G0XA_STATUS_ECC_8_CORRECTED (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
static int pn26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 3)
return -ERANGE;
region->offset = 6 + (15 * section); /* 4 BBM + 2 user bytes */
region->length = 13;
return 0;
}
static int pn26g0xa_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 4)
return -ERANGE;
if (section == 4) {
region->offset = 64;
region->length = 64;
} else {
region->offset = 4 + (15 * section);
region->length = 2;
}
return 0;
}
static int pn26g0xa_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
switch (status & PN26G0XA_STATUS_ECC_BITMASK) {
case PN26G0XA_STATUS_ECC_NONE_DETECTED:
return 0;
case PN26G0XA_STATUS_ECC_1_7_CORRECTED:
return 7; /* Return upper limit by convention */
case PN26G0XA_STATUS_ECC_8_CORRECTED:
return 8;
case PN26G0XA_STATUS_ECC_ERRORED:
return -EBADMSG;
default:
break;
}
return -EINVAL;
}
static const struct mtd_ooblayout_ops pn26g0xa_ooblayout = {
.ecc = pn26g0xa_ooblayout_ecc,
.free = pn26g0xa_ooblayout_free,
};
static const struct spinand_info paragon_spinand_table[] = {
SPINAND_INFO("PN26G01A",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe1),
NAND_MEMORG(1, 2048, 128, 64, 1024, 21, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&pn26g0xa_ooblayout,
pn26g0xa_ecc_get_status)),
SPINAND_INFO("PN26G02A",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe2),
NAND_MEMORG(1, 2048, 128, 64, 2048, 41, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&pn26g0xa_ooblayout,
pn26g0xa_ecc_get_status)),
};
static const struct spinand_manufacturer_ops paragon_spinand_manuf_ops = {
};
const struct spinand_manufacturer paragon_spinand_manufacturer = {
.id = SPINAND_MFR_PARAGON,
.name = "Paragon",
.chips = paragon_spinand_table,
.nchips = ARRAY_SIZE(paragon_spinand_table),
.ops = ¶gon_spinand_manuf_ops,
};
| linux-master | drivers/mtd/nand/spi/paragon.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SuperH FLCTL nand controller
*
* Copyright (c) 2008 Renesas Solutions Corp.
* Copyright (c) 2008 Atom Create Engineering Co., Ltd.
*
* Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/sh_flctl.h>
static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section)
return -ERANGE;
oobregion->offset = 0;
oobregion->length = chip->ecc.bytes;
return 0;
}
static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section)
return -ERANGE;
oobregion->offset = 12;
oobregion->length = 4;
return 0;
}
static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = {
.ecc = flctl_4secc_ooblayout_sp_ecc,
.free = flctl_4secc_ooblayout_sp_free,
};
static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section >= chip->ecc.steps)
return -ERANGE;
oobregion->offset = (section * 16) + 6;
oobregion->length = chip->ecc.bytes;
return 0;
}
static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section >= chip->ecc.steps)
return -ERANGE;
oobregion->offset = section * 16;
oobregion->length = 6;
if (!section) {
oobregion->offset += 2;
oobregion->length -= 2;
}
return 0;
}
static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = {
.ecc = flctl_4secc_ooblayout_lp_ecc,
.free = flctl_4secc_ooblayout_lp_free,
};
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
static struct nand_bbt_descr flctl_4secc_smallpage = {
.offs = 11,
.len = 1,
.pattern = scan_ff_pattern,
};
static struct nand_bbt_descr flctl_4secc_largepage = {
.offs = 0,
.len = 2,
.pattern = scan_ff_pattern,
};
static void empty_fifo(struct sh_flctl *flctl)
{
writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
}
static void start_translation(struct sh_flctl *flctl)
{
writeb(TRSTRT, FLTRCR(flctl));
}
static void timeout_error(struct sh_flctl *flctl, const char *str)
{
dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
}
static void wait_completion(struct sh_flctl *flctl)
{
uint32_t timeout = LOOP_TIMEOUT_MAX;
while (timeout--) {
if (readb(FLTRCR(flctl)) & TREND) {
writeb(0x0, FLTRCR(flctl));
return;
}
udelay(1);
}
timeout_error(flctl, __func__);
writeb(0x0, FLTRCR(flctl));
}
static void flctl_dma_complete(void *param)
{
struct sh_flctl *flctl = param;
complete(&flctl->dma_complete);
}
static void flctl_release_dma(struct sh_flctl *flctl)
{
if (flctl->chan_fifo0_rx) {
dma_release_channel(flctl->chan_fifo0_rx);
flctl->chan_fifo0_rx = NULL;
}
if (flctl->chan_fifo0_tx) {
dma_release_channel(flctl->chan_fifo0_tx);
flctl->chan_fifo0_tx = NULL;
}
}
static void flctl_setup_dma(struct sh_flctl *flctl)
{
dma_cap_mask_t mask;
struct dma_slave_config cfg;
struct platform_device *pdev = flctl->pdev;
struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev);
int ret;
if (!pdata)
return;
if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
return;
/* We can only either use DMA for both Tx and Rx or not use it at all */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
(void *)(uintptr_t)pdata->slave_id_fifo0_tx);
dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
flctl->chan_fifo0_tx);
if (!flctl->chan_fifo0_tx)
return;
memset(&cfg, 0, sizeof(cfg));
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = flctl->fifo;
cfg.src_addr = 0;
ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
if (ret < 0)
goto err;
flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
(void *)(uintptr_t)pdata->slave_id_fifo0_rx);
dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
flctl->chan_fifo0_rx);
if (!flctl->chan_fifo0_rx)
goto err;
cfg.direction = DMA_DEV_TO_MEM;
cfg.dst_addr = 0;
cfg.src_addr = flctl->fifo;
ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
if (ret < 0)
goto err;
init_completion(&flctl->dma_complete);
return;
err:
flctl_release_dma(flctl);
}
static void set_addr(struct mtd_info *mtd, int column, int page_addr)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
uint32_t addr = 0;
if (column == -1) {
addr = page_addr; /* ERASE1 */
} else if (page_addr != -1) {
/* SEQIN, READ0, etc.. */
if (flctl->chip.options & NAND_BUSWIDTH_16)
column >>= 1;
if (flctl->page_size) {
addr = column & 0x0FFF;
addr |= (page_addr & 0xff) << 16;
addr |= ((page_addr >> 8) & 0xff) << 24;
/* big than 128MB */
if (flctl->rw_ADRCNT == ADRCNT2_E) {
uint32_t addr2;
addr2 = (page_addr >> 16) & 0xff;
writel(addr2, FLADR2(flctl));
}
} else {
addr = column;
addr |= (page_addr & 0xff) << 8;
addr |= ((page_addr >> 8) & 0xff) << 16;
addr |= ((page_addr >> 16) & 0xff) << 24;
}
}
writel(addr, FLADR(flctl));
}
static void wait_rfifo_ready(struct sh_flctl *flctl)
{
uint32_t timeout = LOOP_TIMEOUT_MAX;
while (timeout--) {
uint32_t val;
/* check FIFO */
val = readl(FLDTCNTR(flctl)) >> 16;
if (val & 0xFF)
return;
udelay(1);
}
timeout_error(flctl, __func__);
}
static void wait_wfifo_ready(struct sh_flctl *flctl)
{
uint32_t len, timeout = LOOP_TIMEOUT_MAX;
while (timeout--) {
/* check FIFO */
len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
if (len >= 4)
return;
udelay(1);
}
timeout_error(flctl, __func__);
}
static enum flctl_ecc_res_t wait_recfifo_ready
(struct sh_flctl *flctl, int sector_number)
{
uint32_t timeout = LOOP_TIMEOUT_MAX;
void __iomem *ecc_reg[4];
int i;
int state = FL_SUCCESS;
uint32_t data, size;
/*
* First this loops checks in FLDTCNTR if we are ready to read out the
* oob data. This is the case if either all went fine without errors or
* if the bottom part of the loop corrected the errors or marked them as
* uncorrectable and the controller is given time to push the data into
* the FIFO.
*/
while (timeout--) {
/* check if all is ok and we can read out the OOB */
size = readl(FLDTCNTR(flctl)) >> 24;
if ((size & 0xFF) == 4)
return state;
/* check if a correction code has been calculated */
if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
/*
* either we wait for the fifo to be filled or a
* correction pattern is being generated
*/
udelay(1);
continue;
}
/* check for an uncorrectable error */
if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
/* check if we face a non-empty page */
for (i = 0; i < 512; i++) {
if (flctl->done_buff[i] != 0xff) {
state = FL_ERROR; /* can't correct */
break;
}
}
if (state == FL_SUCCESS)
dev_dbg(&flctl->pdev->dev,
"reading empty sector %d, ecc error ignored\n",
sector_number);
writel(0, FL4ECCCR(flctl));
continue;
}
/* start error correction */
ecc_reg[0] = FL4ECCRESULT0(flctl);
ecc_reg[1] = FL4ECCRESULT1(flctl);
ecc_reg[2] = FL4ECCRESULT2(flctl);
ecc_reg[3] = FL4ECCRESULT3(flctl);
for (i = 0; i < 3; i++) {
uint8_t org;
unsigned int index;
data = readl(ecc_reg[i]);
if (flctl->page_size)
index = (512 * sector_number) +
(data >> 16);
else
index = data >> 16;
org = flctl->done_buff[index];
flctl->done_buff[index] = org ^ (data & 0xFF);
}
state = FL_REPAIRABLE;
writel(0, FL4ECCCR(flctl));
}
timeout_error(flctl, __func__);
return FL_TIMEOUT; /* timeout */
}
static void wait_wecfifo_ready(struct sh_flctl *flctl)
{
uint32_t timeout = LOOP_TIMEOUT_MAX;
uint32_t len;
while (timeout--) {
/* check FLECFIFO */
len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
if (len >= 4)
return;
udelay(1);
}
timeout_error(flctl, __func__);
}
static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
int len, enum dma_data_direction dir)
{
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan;
enum dma_transfer_direction tr_dir;
dma_addr_t dma_addr;
dma_cookie_t cookie;
uint32_t reg;
int ret = 0;
unsigned long time_left;
if (dir == DMA_FROM_DEVICE) {
chan = flctl->chan_fifo0_rx;
tr_dir = DMA_DEV_TO_MEM;
} else {
chan = flctl->chan_fifo0_tx;
tr_dir = DMA_MEM_TO_DEV;
}
dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
if (!dma_mapping_error(chan->device->dev, dma_addr))
desc = dmaengine_prep_slave_single(chan, dma_addr, len,
tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (desc) {
reg = readl(FLINTDMACR(flctl));
reg |= DREQ0EN;
writel(reg, FLINTDMACR(flctl));
desc->callback = flctl_dma_complete;
desc->callback_param = flctl;
cookie = dmaengine_submit(desc);
if (dma_submit_error(cookie)) {
ret = dma_submit_error(cookie);
dev_warn(&flctl->pdev->dev,
"DMA submit failed, falling back to PIO\n");
goto out;
}
dma_async_issue_pending(chan);
} else {
/* DMA failed, fall back to PIO */
flctl_release_dma(flctl);
dev_warn(&flctl->pdev->dev,
"DMA failed, falling back to PIO\n");
ret = -EIO;
goto out;
}
time_left =
wait_for_completion_timeout(&flctl->dma_complete,
msecs_to_jiffies(3000));
if (time_left == 0) {
dmaengine_terminate_all(chan);
dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
ret = -ETIMEDOUT;
}
out:
reg = readl(FLINTDMACR(flctl));
reg &= ~DREQ0EN;
writel(reg, FLINTDMACR(flctl));
dma_unmap_single(chan->device->dev, dma_addr, len, dir);
/* ret == 0 is success */
return ret;
}
static void read_datareg(struct sh_flctl *flctl, int offset)
{
unsigned long data;
unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
wait_completion(flctl);
data = readl(FLDATAR(flctl));
*buf = le32_to_cpu(data);
}
static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
{
int i, len_4align;
unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
len_4align = (rlen + 3) / 4;
/* initiate DMA transfer */
if (flctl->chan_fifo0_rx && rlen >= 32 &&
!flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE))
goto convert; /* DMA success */
/* do polling transfer */
for (i = 0; i < len_4align; i++) {
wait_rfifo_ready(flctl);
buf[i] = readl(FLDTFIFO(flctl));
}
convert:
for (i = 0; i < len_4align; i++)
buf[i] = be32_to_cpu(buf[i]);
}
static enum flctl_ecc_res_t read_ecfiforeg
(struct sh_flctl *flctl, uint8_t *buff, int sector)
{
int i;
enum flctl_ecc_res_t res;
unsigned long *ecc_buf = (unsigned long *)buff;
res = wait_recfifo_ready(flctl , sector);
if (res != FL_ERROR) {
for (i = 0; i < 4; i++) {
ecc_buf[i] = readl(FLECFIFO(flctl));
ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
}
}
return res;
}
static void write_fiforeg(struct sh_flctl *flctl, int rlen,
unsigned int offset)
{
int i, len_4align;
unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
len_4align = (rlen + 3) / 4;
for (i = 0; i < len_4align; i++) {
wait_wfifo_ready(flctl);
writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
}
}
static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
unsigned int offset)
{
int i, len_4align;
unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
len_4align = (rlen + 3) / 4;
for (i = 0; i < len_4align; i++)
buf[i] = cpu_to_be32(buf[i]);
/* initiate DMA transfer */
if (flctl->chan_fifo0_tx && rlen >= 32 &&
!flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE))
return; /* DMA success */
/* do polling transfer */
for (i = 0; i < len_4align; i++) {
wait_wecfifo_ready(flctl);
writel(buf[i], FLECFIFO(flctl));
}
}
static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
uint32_t flcmdcr_val, addr_len_bytes = 0;
/* Set SNAND bit if page size is 2048byte */
if (flctl->page_size)
flcmncr_val |= SNAND_E;
else
flcmncr_val &= ~SNAND_E;
/* default FLCMDCR val */
flcmdcr_val = DOCMD1_E | DOADR_E;
/* Set for FLCMDCR */
switch (cmd) {
case NAND_CMD_ERASE1:
addr_len_bytes = flctl->erase_ADRCNT;
flcmdcr_val |= DOCMD2_E;
break;
case NAND_CMD_READ0:
case NAND_CMD_READOOB:
case NAND_CMD_RNDOUT:
addr_len_bytes = flctl->rw_ADRCNT;
flcmdcr_val |= CDSRC_E;
if (flctl->chip.options & NAND_BUSWIDTH_16)
flcmncr_val |= SEL_16BIT;
break;
case NAND_CMD_SEQIN:
/* This case is that cmd is READ0 or READ1 or READ00 */
flcmdcr_val &= ~DOADR_E; /* ONLY execute 1st cmd */
break;
case NAND_CMD_PAGEPROG:
addr_len_bytes = flctl->rw_ADRCNT;
flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
if (flctl->chip.options & NAND_BUSWIDTH_16)
flcmncr_val |= SEL_16BIT;
break;
case NAND_CMD_READID:
flcmncr_val &= ~SNAND_E;
flcmdcr_val |= CDSRC_E;
addr_len_bytes = ADRCNT_1;
break;
case NAND_CMD_STATUS:
case NAND_CMD_RESET:
flcmncr_val &= ~SNAND_E;
flcmdcr_val &= ~(DOADR_E | DOSR_E);
break;
default:
break;
}
/* Set address bytes parameter */
flcmdcr_val |= addr_len_bytes;
/* Now actually write */
writel(flcmncr_val, FLCMNCR(flctl));
writel(flcmdcr_val, FLCMDCR(flctl));
writel(flcmcdr_val, FLCMCDR(flctl));
}
static int flctl_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
return 0;
}
static int flctl_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
int sector, page_sectors;
enum flctl_ecc_res_t ecc_result;
page_sectors = flctl->page_size ? 4 : 1;
set_cmd_regs(mtd, NAND_CMD_READ0,
(NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
FLCMNCR(flctl));
writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
writel(page_addr << 2, FLADR(flctl));
empty_fifo(flctl);
start_translation(flctl);
for (sector = 0; sector < page_sectors; sector++) {
read_fiforeg(flctl, 512, 512 * sector);
ecc_result = read_ecfiforeg(flctl,
&flctl->done_buff[mtd->writesize + 16 * sector],
sector);
switch (ecc_result) {
case FL_REPAIRABLE:
dev_info(&flctl->pdev->dev,
"applied ecc on page 0x%x", page_addr);
mtd->ecc_stats.corrected++;
break;
case FL_ERROR:
dev_warn(&flctl->pdev->dev,
"page 0x%x contains corrupted data\n",
page_addr);
mtd->ecc_stats.failed++;
break;
default:
;
}
}
wait_completion(flctl);
writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
FLCMNCR(flctl));
}
static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
int page_sectors = flctl->page_size ? 4 : 1;
int i;
set_cmd_regs(mtd, NAND_CMD_READ0,
(NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
empty_fifo(flctl);
for (i = 0; i < page_sectors; i++) {
set_addr(mtd, (512 + 16) * i + 512 , page_addr);
writel(16, FLDTCNTR(flctl));
start_translation(flctl);
read_fiforeg(flctl, 16, 16 * i);
wait_completion(flctl);
}
}
static void execmd_write_page_sector(struct mtd_info *mtd)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
int page_addr = flctl->seqin_page_addr;
int sector, page_sectors;
page_sectors = flctl->page_size ? 4 : 1;
set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
empty_fifo(flctl);
writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
writel(page_addr << 2, FLADR(flctl));
start_translation(flctl);
for (sector = 0; sector < page_sectors; sector++) {
write_fiforeg(flctl, 512, 512 * sector);
write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
}
wait_completion(flctl);
writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
}
static void execmd_write_oob(struct mtd_info *mtd)
{
struct sh_flctl *flctl = mtd_to_flctl(mtd);
int page_addr = flctl->seqin_page_addr;
int sector, page_sectors;
page_sectors = flctl->page_size ? 4 : 1;
set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
for (sector = 0; sector < page_sectors; sector++) {
empty_fifo(flctl);
set_addr(mtd, sector * 528 + 512, page_addr);
writel(16, FLDTCNTR(flctl)); /* set read size */
start_translation(flctl);
write_fiforeg(flctl, 16, 16 * sector);
wait_completion(flctl);
}
}
static void flctl_cmdfunc(struct nand_chip *chip, unsigned int command,
int column, int page_addr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct sh_flctl *flctl = mtd_to_flctl(mtd);
uint32_t read_cmd = 0;
pm_runtime_get_sync(&flctl->pdev->dev);
flctl->read_bytes = 0;
if (command != NAND_CMD_PAGEPROG)
flctl->index = 0;
switch (command) {
case NAND_CMD_READ1:
case NAND_CMD_READ0:
if (flctl->hwecc) {
/* read page with hwecc */
execmd_read_page_sector(mtd, page_addr);
break;
}
if (flctl->page_size)
set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
| command);
else
set_cmd_regs(mtd, command, command);
set_addr(mtd, 0, page_addr);
flctl->read_bytes = mtd->writesize + mtd->oobsize;
if (flctl->chip.options & NAND_BUSWIDTH_16)
column >>= 1;
flctl->index += column;
goto read_normal_exit;
case NAND_CMD_READOOB:
if (flctl->hwecc) {
/* read page with hwecc */
execmd_read_oob(mtd, page_addr);
break;
}
if (flctl->page_size) {
set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
| NAND_CMD_READ0);
set_addr(mtd, mtd->writesize, page_addr);
} else {
set_cmd_regs(mtd, command, command);
set_addr(mtd, 0, page_addr);
}
flctl->read_bytes = mtd->oobsize;
goto read_normal_exit;
case NAND_CMD_RNDOUT:
if (flctl->hwecc)
break;
if (flctl->page_size)
set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
| command);
else
set_cmd_regs(mtd, command, command);
set_addr(mtd, column, 0);
flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
goto read_normal_exit;
case NAND_CMD_READID:
set_cmd_regs(mtd, command, command);
/* READID is always performed using an 8-bit bus */
if (flctl->chip.options & NAND_BUSWIDTH_16)
column <<= 1;
set_addr(mtd, column, 0);
flctl->read_bytes = 8;
writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
empty_fifo(flctl);
start_translation(flctl);
read_fiforeg(flctl, flctl->read_bytes, 0);
wait_completion(flctl);
break;
case NAND_CMD_ERASE1:
flctl->erase1_page_addr = page_addr;
break;
case NAND_CMD_ERASE2:
set_cmd_regs(mtd, NAND_CMD_ERASE1,
(command << 8) | NAND_CMD_ERASE1);
set_addr(mtd, -1, flctl->erase1_page_addr);
start_translation(flctl);
wait_completion(flctl);
break;
case NAND_CMD_SEQIN:
if (!flctl->page_size) {
/* output read command */
if (column >= mtd->writesize) {
column -= mtd->writesize;
read_cmd = NAND_CMD_READOOB;
} else if (column < 256) {
read_cmd = NAND_CMD_READ0;
} else {
column -= 256;
read_cmd = NAND_CMD_READ1;
}
}
flctl->seqin_column = column;
flctl->seqin_page_addr = page_addr;
flctl->seqin_read_cmd = read_cmd;
break;
case NAND_CMD_PAGEPROG:
empty_fifo(flctl);
if (!flctl->page_size) {
set_cmd_regs(mtd, NAND_CMD_SEQIN,
flctl->seqin_read_cmd);
set_addr(mtd, -1, -1);
writel(0, FLDTCNTR(flctl)); /* set 0 size */
start_translation(flctl);
wait_completion(flctl);
}
if (flctl->hwecc) {
/* write page with hwecc */
if (flctl->seqin_column == mtd->writesize)
execmd_write_oob(mtd);
else if (!flctl->seqin_column)
execmd_write_page_sector(mtd);
else
pr_err("Invalid address !?\n");
break;
}
set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
writel(flctl->index, FLDTCNTR(flctl)); /* set write size */
start_translation(flctl);
write_fiforeg(flctl, flctl->index, 0);
wait_completion(flctl);
break;
case NAND_CMD_STATUS:
set_cmd_regs(mtd, command, command);
set_addr(mtd, -1, -1);
flctl->read_bytes = 1;
writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
start_translation(flctl);
read_datareg(flctl, 0); /* read and end */
break;
case NAND_CMD_RESET:
set_cmd_regs(mtd, command, command);
set_addr(mtd, -1, -1);
writel(0, FLDTCNTR(flctl)); /* set 0 size */
start_translation(flctl);
wait_completion(flctl);
break;
default:
break;
}
goto runtime_exit;
read_normal_exit:
writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
empty_fifo(flctl);
start_translation(flctl);
read_fiforeg(flctl, flctl->read_bytes, 0);
wait_completion(flctl);
runtime_exit:
pm_runtime_put_sync(&flctl->pdev->dev);
return;
}
static void flctl_select_chip(struct nand_chip *chip, int chipnr)
{
struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
int ret;
switch (chipnr) {
case -1:
flctl->flcmncr_base &= ~CE0_ENABLE;
pm_runtime_get_sync(&flctl->pdev->dev);
writel(flctl->flcmncr_base, FLCMNCR(flctl));
if (flctl->qos_request) {
dev_pm_qos_remove_request(&flctl->pm_qos);
flctl->qos_request = 0;
}
pm_runtime_put_sync(&flctl->pdev->dev);
break;
case 0:
flctl->flcmncr_base |= CE0_ENABLE;
if (!flctl->qos_request) {
ret = dev_pm_qos_add_request(&flctl->pdev->dev,
&flctl->pm_qos,
DEV_PM_QOS_RESUME_LATENCY,
100);
if (ret < 0)
dev_err(&flctl->pdev->dev,
"PM QoS request failed: %d\n", ret);
flctl->qos_request = 1;
}
if (flctl->holden) {
pm_runtime_get_sync(&flctl->pdev->dev);
writel(HOLDEN, FLHOLDCR(flctl));
pm_runtime_put_sync(&flctl->pdev->dev);
}
break;
default:
BUG();
}
}
static void flctl_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
{
struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
memcpy(&flctl->done_buff[flctl->index], buf, len);
flctl->index += len;
}
static uint8_t flctl_read_byte(struct nand_chip *chip)
{
struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
uint8_t data;
data = flctl->done_buff[flctl->index];
flctl->index++;
return data;
}
static void flctl_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
memcpy(buf, &flctl->done_buff[flctl->index], len);
flctl->index += len;
}
static int flctl_chip_attach_chip(struct nand_chip *chip)
{
u64 targetsize = nanddev_target_size(&chip->base);
struct mtd_info *mtd = nand_to_mtd(chip);
struct sh_flctl *flctl = mtd_to_flctl(mtd);
/*
* NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
* Add the SEL_16BIT flag in flctl->flcmncr_base.
*/
if (chip->options & NAND_BUSWIDTH_16)
flctl->flcmncr_base |= SEL_16BIT;
if (mtd->writesize == 512) {
flctl->page_size = 0;
if (targetsize > (32 << 20)) {
/* big than 32MB */
flctl->rw_ADRCNT = ADRCNT_4;
flctl->erase_ADRCNT = ADRCNT_3;
} else if (targetsize > (2 << 16)) {
/* big than 128KB */
flctl->rw_ADRCNT = ADRCNT_3;
flctl->erase_ADRCNT = ADRCNT_2;
} else {
flctl->rw_ADRCNT = ADRCNT_2;
flctl->erase_ADRCNT = ADRCNT_1;
}
} else {
flctl->page_size = 1;
if (targetsize > (128 << 20)) {
/* big than 128MB */
flctl->rw_ADRCNT = ADRCNT2_E;
flctl->erase_ADRCNT = ADRCNT_3;
} else if (targetsize > (8 << 16)) {
/* big than 512KB */
flctl->rw_ADRCNT = ADRCNT_4;
flctl->erase_ADRCNT = ADRCNT_2;
} else {
flctl->rw_ADRCNT = ADRCNT_3;
flctl->erase_ADRCNT = ADRCNT_1;
}
}
if (flctl->hwecc) {
if (mtd->writesize == 512) {
mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops);
chip->badblock_pattern = &flctl_4secc_smallpage;
} else {
mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops);
chip->badblock_pattern = &flctl_4secc_largepage;
}
chip->ecc.size = 512;
chip->ecc.bytes = 10;
chip->ecc.strength = 4;
chip->ecc.read_page = flctl_read_page_hwecc;
chip->ecc.write_page = flctl_write_page_hwecc;
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
/* 4 symbols ECC enabled */
flctl->flcmncr_base |= _4ECCEN;
} else {
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
}
return 0;
}
static const struct nand_controller_ops flctl_nand_controller_ops = {
.attach_chip = flctl_chip_attach_chip,
};
static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
{
struct sh_flctl *flctl = dev_id;
dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
return IRQ_HANDLED;
}
struct flctl_soc_config {
unsigned long flcmncr_val;
unsigned has_hwecc:1;
unsigned use_holden:1;
};
static struct flctl_soc_config flctl_sh7372_config = {
.flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
.has_hwecc = 1,
.use_holden = 1,
};
static const struct of_device_id of_flctl_match[] = {
{ .compatible = "renesas,shmobile-flctl-sh7372",
.data = &flctl_sh7372_config },
{},
};
MODULE_DEVICE_TABLE(of, of_flctl_match);
static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
{
const struct flctl_soc_config *config;
struct sh_flctl_platform_data *pdata;
config = of_device_get_match_data(dev);
if (!config) {
dev_err(dev, "%s: no OF configuration attached\n", __func__);
return NULL;
}
pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
GFP_KERNEL);
if (!pdata)
return NULL;
/* set SoC specific options */
pdata->flcmncr_val = config->flcmncr_val;
pdata->has_hwecc = config->has_hwecc;
pdata->use_holden = config->use_holden;
return pdata;
}
static int flctl_probe(struct platform_device *pdev)
{
struct resource *res;
struct sh_flctl *flctl;
struct mtd_info *flctl_mtd;
struct nand_chip *nand;
struct sh_flctl_platform_data *pdata;
int ret;
int irq;
flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
if (!flctl)
return -ENOMEM;
flctl->reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(flctl->reg))
return PTR_ERR(flctl->reg);
flctl->fifo = res->start + 0x24; /* FLDTFIFO */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
"flste", flctl);
if (ret) {
dev_err(&pdev->dev, "request interrupt failed.\n");
return ret;
}
if (pdev->dev.of_node)
pdata = flctl_parse_dt(&pdev->dev);
else
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "no setup data defined\n");
return -EINVAL;
}
platform_set_drvdata(pdev, flctl);
nand = &flctl->chip;
flctl_mtd = nand_to_mtd(nand);
nand_set_flash_node(nand, pdev->dev.of_node);
flctl_mtd->dev.parent = &pdev->dev;
flctl->pdev = pdev;
flctl->hwecc = pdata->has_hwecc;
flctl->holden = pdata->use_holden;
flctl->flcmncr_base = pdata->flcmncr_val;
flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
/* Set address of hardware control function */
/* 20 us command delay time */
nand->legacy.chip_delay = 20;
nand->legacy.read_byte = flctl_read_byte;
nand->legacy.write_buf = flctl_write_buf;
nand->legacy.read_buf = flctl_read_buf;
nand->legacy.select_chip = flctl_select_chip;
nand->legacy.cmdfunc = flctl_cmdfunc;
nand->legacy.set_features = nand_get_set_features_notsupp;
nand->legacy.get_features = nand_get_set_features_notsupp;
if (pdata->flcmncr_val & SEL_16BIT)
nand->options |= NAND_BUSWIDTH_16;
nand->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
flctl_setup_dma(flctl);
nand->legacy.dummy_controller.ops = &flctl_nand_controller_ops;
ret = nand_scan(nand, 1);
if (ret)
goto err_chip;
ret = mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
if (ret)
goto cleanup_nand;
return 0;
cleanup_nand:
nand_cleanup(nand);
err_chip:
flctl_release_dma(flctl);
pm_runtime_disable(&pdev->dev);
return ret;
}
static void flctl_remove(struct platform_device *pdev)
{
struct sh_flctl *flctl = platform_get_drvdata(pdev);
struct nand_chip *chip = &flctl->chip;
int ret;
flctl_release_dma(flctl);
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
pm_runtime_disable(&pdev->dev);
}
static struct platform_driver flctl_driver = {
.remove_new = flctl_remove,
.driver = {
.name = "sh_flctl",
.of_match_table = of_flctl_match,
},
};
module_platform_driver_probe(flctl_driver, flctl_probe);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_DESCRIPTION("SuperH FLCTL driver");
MODULE_ALIAS("platform:sh_flctl");
| linux-master | drivers/mtd/nand/raw/sh_flctl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2009 - Maxim Levitsky
* Common routines & support for xD format
*/
#include <linux/kernel.h>
#include <linux/mtd/rawnand.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include "sm_common.h"
static int oob_sm_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section > 1)
return -ERANGE;
oobregion->length = 3;
oobregion->offset = ((section + 1) * 8) - 3;
return 0;
}
static int oob_sm_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
switch (section) {
case 0:
/* reserved */
oobregion->offset = 0;
oobregion->length = 4;
break;
case 1:
/* LBA1 */
oobregion->offset = 6;
oobregion->length = 2;
break;
case 2:
/* LBA2 */
oobregion->offset = 11;
oobregion->length = 2;
break;
default:
return -ERANGE;
}
return 0;
}
static const struct mtd_ooblayout_ops oob_sm_ops = {
.ecc = oob_sm_ooblayout_ecc,
.free = oob_sm_ooblayout_free,
};
/* NOTE: This layout is not compatabable with SmartMedia, */
/* because the 256 byte devices have page depenent oob layout */
/* However it does preserve the bad block markers */
/* If you use smftl, it will bypass this and work correctly */
/* If you not, then you break SmartMedia compliance anyway */
static int oob_sm_small_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
if (section)
return -ERANGE;
oobregion->length = 3;
oobregion->offset = 0;
return 0;
}
static int oob_sm_small_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
switch (section) {
case 0:
/* reserved */
oobregion->offset = 3;
oobregion->length = 2;
break;
case 1:
/* LBA1 */
oobregion->offset = 6;
oobregion->length = 2;
break;
default:
return -ERANGE;
}
return 0;
}
static const struct mtd_ooblayout_ops oob_sm_small_ops = {
.ecc = oob_sm_small_ooblayout_ecc,
.free = oob_sm_small_ooblayout_free,
};
static int sm_block_markbad(struct nand_chip *chip, loff_t ofs)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mtd_oob_ops ops = { };
struct sm_oob oob;
int ret;
memset(&oob, -1, SM_OOB_SIZE);
oob.block_status = 0x0F;
/* As long as this function is called on erase block boundaries
it will work correctly for 256 byte nand */
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
ops.oobbuf = (void *)&oob;
ops.datbuf = NULL;
ret = mtd_write_oob(mtd, ofs, &ops);
if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
pr_notice("sm_common: can't mark sector at %i as bad\n",
(int)ofs);
return -EIO;
}
return 0;
}
static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
LEGACY_ID_NAND("SmartMedia 2MiB 3,3V ROM", 0x5d, 2, SZ_8K, NAND_ROM),
LEGACY_ID_NAND("SmartMedia 4MiB 3,3V", 0xe3, 4, SZ_8K, 0),
LEGACY_ID_NAND("SmartMedia 4MiB 3,3/5V", 0xe5, 4, SZ_8K, 0),
LEGACY_ID_NAND("SmartMedia 4MiB 5V", 0x6b, 4, SZ_8K, 0),
LEGACY_ID_NAND("SmartMedia 4MiB 3,3V ROM", 0xd5, 4, SZ_8K, NAND_ROM),
LEGACY_ID_NAND("SmartMedia 8MiB 3,3V", 0xe6, 8, SZ_8K, 0),
LEGACY_ID_NAND("SmartMedia 8MiB 3,3V ROM", 0xd6, 8, SZ_8K, NAND_ROM),
LEGACY_ID_NAND("SmartMedia 16MiB 3,3V", 0x73, 16, SZ_16K, 0),
LEGACY_ID_NAND("SmartMedia 16MiB 3,3V ROM", 0x57, 16, SZ_16K, NAND_ROM),
LEGACY_ID_NAND("SmartMedia 32MiB 3,3V", 0x75, 32, SZ_16K, 0),
LEGACY_ID_NAND("SmartMedia 32MiB 3,3V ROM", 0x58, 32, SZ_16K, NAND_ROM),
LEGACY_ID_NAND("SmartMedia 64MiB 3,3V", 0x76, 64, SZ_16K, 0),
LEGACY_ID_NAND("SmartMedia 64MiB 3,3V ROM", 0xd9, 64, SZ_16K, NAND_ROM),
LEGACY_ID_NAND("SmartMedia 128MiB 3,3V", 0x79, 128, SZ_16K, 0),
LEGACY_ID_NAND("SmartMedia 128MiB 3,3V ROM", 0xda, 128, SZ_16K, NAND_ROM),
LEGACY_ID_NAND("SmartMedia 256MiB 3, 3V", 0x71, 256, SZ_16K, 0),
LEGACY_ID_NAND("SmartMedia 256MiB 3,3V ROM", 0x5b, 256, SZ_16K, NAND_ROM),
{NULL}
};
static struct nand_flash_dev nand_xd_flash_ids[] = {
LEGACY_ID_NAND("xD 16MiB 3,3V", 0x73, 16, SZ_16K, 0),
LEGACY_ID_NAND("xD 32MiB 3,3V", 0x75, 32, SZ_16K, 0),
LEGACY_ID_NAND("xD 64MiB 3,3V", 0x76, 64, SZ_16K, 0),
LEGACY_ID_NAND("xD 128MiB 3,3V", 0x79, 128, SZ_16K, 0),
LEGACY_ID_NAND("xD 256MiB 3,3V", 0x71, 256, SZ_16K, NAND_BROKEN_XD),
LEGACY_ID_NAND("xD 512MiB 3,3V", 0xdc, 512, SZ_16K, NAND_BROKEN_XD),
LEGACY_ID_NAND("xD 1GiB 3,3V", 0xd3, 1024, SZ_16K, NAND_BROKEN_XD),
LEGACY_ID_NAND("xD 2GiB 3,3V", 0xd5, 2048, SZ_16K, NAND_BROKEN_XD),
{NULL}
};
static int sm_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
/* Bad block marker position */
chip->badblockpos = 0x05;
chip->badblockbits = 7;
chip->legacy.block_markbad = sm_block_markbad;
/* ECC layout */
if (mtd->writesize == SM_SECTOR_SIZE)
mtd_set_ooblayout(mtd, &oob_sm_ops);
else if (mtd->writesize == SM_SMALL_PAGE)
mtd_set_ooblayout(mtd, &oob_sm_small_ops);
else
return -ENODEV;
return 0;
}
static const struct nand_controller_ops sm_controller_ops = {
.attach_chip = sm_attach_chip,
};
int sm_register_device(struct mtd_info *mtd, int smartmedia)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_flash_dev *flash_ids;
int ret;
chip->options |= NAND_SKIP_BBTSCAN;
/* Scan for card properties */
chip->legacy.dummy_controller.ops = &sm_controller_ops;
flash_ids = smartmedia ? nand_smartmedia_flash_ids : nand_xd_flash_ids;
ret = nand_scan_with_ids(chip, 1, flash_ids);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret)
nand_cleanup(chip);
return ret;
}
EXPORT_SYMBOL_GPL(sm_register_device);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Maxim Levitsky <[email protected]>");
MODULE_DESCRIPTION("Common SmartMedia/xD functions");
| linux-master | drivers/mtd/nand/raw/sm_common.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Cadence NAND flash controller driver
*
* Copyright (C) 2019 Cadence
*
* Author: Piotr Sroka <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/of_device.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
/*
* HPNFC can work in 3 modes:
* - PIO - can work in master or slave DMA
* - CDMA - needs Master DMA for accessing command descriptors.
* - Generic mode - can use only slave DMA.
* CDMA and PIO modes can be used to execute only base commands.
* Generic mode can be used to execute any command
* on NAND flash memory. Driver uses CDMA mode for
* block erasing, page reading, page programing.
* Generic mode is used for executing rest of commands.
*/
#define MAX_ADDRESS_CYC 6
#define MAX_ERASE_ADDRESS_CYC 3
#define MAX_DATA_SIZE 0xFFFC
#define DMA_DATA_SIZE_ALIGN 8
/* Register definition. */
/*
* Command register 0.
* Writing data to this register will initiate a new transaction
* of the NF controller.
*/
#define CMD_REG0 0x0000
/* Command type field mask. */
#define CMD_REG0_CT GENMASK(31, 30)
/* Command type CDMA. */
#define CMD_REG0_CT_CDMA 0uL
/* Command type generic. */
#define CMD_REG0_CT_GEN 3uL
/* Command thread number field mask. */
#define CMD_REG0_TN GENMASK(27, 24)
/* Command register 2. */
#define CMD_REG2 0x0008
/* Command register 3. */
#define CMD_REG3 0x000C
/* Pointer register to select which thread status will be selected. */
#define CMD_STATUS_PTR 0x0010
/* Command status register for selected thread. */
#define CMD_STATUS 0x0014
/* Interrupt status register. */
#define INTR_STATUS 0x0110
#define INTR_STATUS_SDMA_ERR BIT(22)
#define INTR_STATUS_SDMA_TRIGG BIT(21)
#define INTR_STATUS_UNSUPP_CMD BIT(19)
#define INTR_STATUS_DDMA_TERR BIT(18)
#define INTR_STATUS_CDMA_TERR BIT(17)
#define INTR_STATUS_CDMA_IDL BIT(16)
/* Interrupt enable register. */
#define INTR_ENABLE 0x0114
#define INTR_ENABLE_INTR_EN BIT(31)
#define INTR_ENABLE_SDMA_ERR_EN BIT(22)
#define INTR_ENABLE_SDMA_TRIGG_EN BIT(21)
#define INTR_ENABLE_UNSUPP_CMD_EN BIT(19)
#define INTR_ENABLE_DDMA_TERR_EN BIT(18)
#define INTR_ENABLE_CDMA_TERR_EN BIT(17)
#define INTR_ENABLE_CDMA_IDLE_EN BIT(16)
/* Controller internal state. */
#define CTRL_STATUS 0x0118
#define CTRL_STATUS_INIT_COMP BIT(9)
#define CTRL_STATUS_CTRL_BUSY BIT(8)
/* Command Engine threads state. */
#define TRD_STATUS 0x0120
/* Command Engine interrupt thread error status. */
#define TRD_ERR_INT_STATUS 0x0128
/* Command Engine interrupt thread error enable. */
#define TRD_ERR_INT_STATUS_EN 0x0130
/* Command Engine interrupt thread complete status. */
#define TRD_COMP_INT_STATUS 0x0138
/*
* Transfer config 0 register.
* Configures data transfer parameters.
*/
#define TRAN_CFG_0 0x0400
/* Offset value from the beginning of the page. */
#define TRAN_CFG_0_OFFSET GENMASK(31, 16)
/* Numbers of sectors to transfer within singlNF device's page. */
#define TRAN_CFG_0_SEC_CNT GENMASK(7, 0)
/*
* Transfer config 1 register.
* Configures data transfer parameters.
*/
#define TRAN_CFG_1 0x0404
/* Size of last data sector. */
#define TRAN_CFG_1_LAST_SEC_SIZE GENMASK(31, 16)
/* Size of not-last data sector. */
#define TRAN_CFG_1_SECTOR_SIZE GENMASK(15, 0)
/* ECC engine configuration register 0. */
#define ECC_CONFIG_0 0x0428
/* Correction strength. */
#define ECC_CONFIG_0_CORR_STR GENMASK(10, 8)
/* Enable erased pages detection mechanism. */
#define ECC_CONFIG_0_ERASE_DET_EN BIT(1)
/* Enable controller ECC check bits generation and correction. */
#define ECC_CONFIG_0_ECC_EN BIT(0)
/* ECC engine configuration register 1. */
#define ECC_CONFIG_1 0x042C
/* Multiplane settings register. */
#define MULTIPLANE_CFG 0x0434
/* Cache operation settings. */
#define CACHE_CFG 0x0438
/* DMA settings register. */
#define DMA_SETINGS 0x043C
/* Enable SDMA error report on access unprepared slave DMA interface. */
#define DMA_SETINGS_SDMA_ERR_RSP BIT(17)
/* Transferred data block size for the slave DMA module. */
#define SDMA_SIZE 0x0440
/* Thread number associated with transferred data block
* for the slave DMA module.
*/
#define SDMA_TRD_NUM 0x0444
/* Thread number mask. */
#define SDMA_TRD_NUM_SDMA_TRD GENMASK(2, 0)
#define CONTROL_DATA_CTRL 0x0494
/* Thread number mask. */
#define CONTROL_DATA_CTRL_SIZE GENMASK(15, 0)
#define CTRL_VERSION 0x800
#define CTRL_VERSION_REV GENMASK(7, 0)
/* Available hardware features of the controller. */
#define CTRL_FEATURES 0x804
/* Support for NV-DDR2/3 work mode. */
#define CTRL_FEATURES_NVDDR_2_3 BIT(28)
/* Support for NV-DDR work mode. */
#define CTRL_FEATURES_NVDDR BIT(27)
/* Support for asynchronous work mode. */
#define CTRL_FEATURES_ASYNC BIT(26)
/* Support for asynchronous work mode. */
#define CTRL_FEATURES_N_BANKS GENMASK(25, 24)
/* Slave and Master DMA data width. */
#define CTRL_FEATURES_DMA_DWITH64 BIT(21)
/* Availability of Control Data feature.*/
#define CTRL_FEATURES_CONTROL_DATA BIT(10)
/* BCH Engine identification register 0 - correction strengths. */
#define BCH_CFG_0 0x838
#define BCH_CFG_0_CORR_CAP_0 GENMASK(7, 0)
#define BCH_CFG_0_CORR_CAP_1 GENMASK(15, 8)
#define BCH_CFG_0_CORR_CAP_2 GENMASK(23, 16)
#define BCH_CFG_0_CORR_CAP_3 GENMASK(31, 24)
/* BCH Engine identification register 1 - correction strengths. */
#define BCH_CFG_1 0x83C
#define BCH_CFG_1_CORR_CAP_4 GENMASK(7, 0)
#define BCH_CFG_1_CORR_CAP_5 GENMASK(15, 8)
#define BCH_CFG_1_CORR_CAP_6 GENMASK(23, 16)
#define BCH_CFG_1_CORR_CAP_7 GENMASK(31, 24)
/* BCH Engine identification register 2 - sector sizes. */
#define BCH_CFG_2 0x840
#define BCH_CFG_2_SECT_0 GENMASK(15, 0)
#define BCH_CFG_2_SECT_1 GENMASK(31, 16)
/* BCH Engine identification register 3. */
#define BCH_CFG_3 0x844
#define BCH_CFG_3_METADATA_SIZE GENMASK(23, 16)
/* Ready/Busy# line status. */
#define RBN_SETINGS 0x1004
/* Common settings. */
#define COMMON_SET 0x1008
/* 16 bit device connected to the NAND Flash interface. */
#define COMMON_SET_DEVICE_16BIT BIT(8)
/* Skip_bytes registers. */
#define SKIP_BYTES_CONF 0x100C
#define SKIP_BYTES_MARKER_VALUE GENMASK(31, 16)
#define SKIP_BYTES_NUM_OF_BYTES GENMASK(7, 0)
#define SKIP_BYTES_OFFSET 0x1010
#define SKIP_BYTES_OFFSET_VALUE GENMASK(23, 0)
/* Timings configuration. */
#define ASYNC_TOGGLE_TIMINGS 0x101c
#define ASYNC_TOGGLE_TIMINGS_TRH GENMASK(28, 24)
#define ASYNC_TOGGLE_TIMINGS_TRP GENMASK(20, 16)
#define ASYNC_TOGGLE_TIMINGS_TWH GENMASK(12, 8)
#define ASYNC_TOGGLE_TIMINGS_TWP GENMASK(4, 0)
#define TIMINGS0 0x1024
#define TIMINGS0_TADL GENMASK(31, 24)
#define TIMINGS0_TCCS GENMASK(23, 16)
#define TIMINGS0_TWHR GENMASK(15, 8)
#define TIMINGS0_TRHW GENMASK(7, 0)
#define TIMINGS1 0x1028
#define TIMINGS1_TRHZ GENMASK(31, 24)
#define TIMINGS1_TWB GENMASK(23, 16)
#define TIMINGS1_TVDLY GENMASK(7, 0)
#define TIMINGS2 0x102c
#define TIMINGS2_TFEAT GENMASK(25, 16)
#define TIMINGS2_CS_HOLD_TIME GENMASK(13, 8)
#define TIMINGS2_CS_SETUP_TIME GENMASK(5, 0)
/* Configuration of the resynchronization of slave DLL of PHY. */
#define DLL_PHY_CTRL 0x1034
#define DLL_PHY_CTRL_DLL_RST_N BIT(24)
#define DLL_PHY_CTRL_EXTENDED_WR_MODE BIT(17)
#define DLL_PHY_CTRL_EXTENDED_RD_MODE BIT(16)
#define DLL_PHY_CTRL_RS_HIGH_WAIT_CNT GENMASK(11, 8)
#define DLL_PHY_CTRL_RS_IDLE_CNT GENMASK(7, 0)
/* Register controlling DQ related timing. */
#define PHY_DQ_TIMING 0x2000
/* Register controlling DSQ related timing. */
#define PHY_DQS_TIMING 0x2004
#define PHY_DQS_TIMING_DQS_SEL_OE_END GENMASK(3, 0)
#define PHY_DQS_TIMING_PHONY_DQS_SEL BIT(16)
#define PHY_DQS_TIMING_USE_PHONY_DQS BIT(20)
/* Register controlling the gate and loopback control related timing. */
#define PHY_GATE_LPBK_CTRL 0x2008
#define PHY_GATE_LPBK_CTRL_RDS GENMASK(24, 19)
/* Register holds the control for the master DLL logic. */
#define PHY_DLL_MASTER_CTRL 0x200C
#define PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23)
/* Register holds the control for the slave DLL logic. */
#define PHY_DLL_SLAVE_CTRL 0x2010
/* This register handles the global control settings for the PHY. */
#define PHY_CTRL 0x2080
#define PHY_CTRL_SDR_DQS BIT(14)
#define PHY_CTRL_PHONY_DQS GENMASK(9, 4)
/*
* This register handles the global control settings
* for the termination selects for reads.
*/
#define PHY_TSEL 0x2084
/* Generic command layout. */
#define GCMD_LAY_CS GENMASK_ULL(11, 8)
/*
* This bit informs the minicotroller if it has to wait for tWB
* after sending the last CMD/ADDR/DATA in the sequence.
*/
#define GCMD_LAY_TWB BIT_ULL(6)
/* Type of generic instruction. */
#define GCMD_LAY_INSTR GENMASK_ULL(5, 0)
/* Generic CMD sequence type. */
#define GCMD_LAY_INSTR_CMD 0
/* Generic ADDR sequence type. */
#define GCMD_LAY_INSTR_ADDR 1
/* Generic data transfer sequence type. */
#define GCMD_LAY_INSTR_DATA 2
/* Input part of generic command type of input is command. */
#define GCMD_LAY_INPUT_CMD GENMASK_ULL(23, 16)
/* Generic command address sequence - address fields. */
#define GCMD_LAY_INPUT_ADDR GENMASK_ULL(63, 16)
/* Generic command address sequence - address size. */
#define GCMD_LAY_INPUT_ADDR_SIZE GENMASK_ULL(13, 11)
/* Transfer direction field of generic command data sequence. */
#define GCMD_DIR BIT_ULL(11)
/* Read transfer direction of generic command data sequence. */
#define GCMD_DIR_READ 0
/* Write transfer direction of generic command data sequence. */
#define GCMD_DIR_WRITE 1
/* ECC enabled flag of generic command data sequence - ECC enabled. */
#define GCMD_ECC_EN BIT_ULL(12)
/* Generic command data sequence - sector size. */
#define GCMD_SECT_SIZE GENMASK_ULL(31, 16)
/* Generic command data sequence - sector count. */
#define GCMD_SECT_CNT GENMASK_ULL(39, 32)
/* Generic command data sequence - last sector size. */
#define GCMD_LAST_SIZE GENMASK_ULL(55, 40)
/* CDMA descriptor fields. */
/* Erase command type of CDMA descriptor. */
#define CDMA_CT_ERASE 0x1000
/* Program page command type of CDMA descriptor. */
#define CDMA_CT_WR 0x2100
/* Read page command type of CDMA descriptor. */
#define CDMA_CT_RD 0x2200
/* Flash pointer memory shift. */
#define CDMA_CFPTR_MEM_SHIFT 24
/* Flash pointer memory mask. */
#define CDMA_CFPTR_MEM GENMASK(26, 24)
/*
* Command DMA descriptor flags. If set causes issue interrupt after
* the completion of descriptor processing.
*/
#define CDMA_CF_INT BIT(8)
/*
* Command DMA descriptor flags - the next descriptor
* address field is valid and descriptor processing should continue.
*/
#define CDMA_CF_CONT BIT(9)
/* DMA master flag of command DMA descriptor. */
#define CDMA_CF_DMA_MASTER BIT(10)
/* Operation complete status of command descriptor. */
#define CDMA_CS_COMP BIT(15)
/* Operation complete status of command descriptor. */
/* Command descriptor status - operation fail. */
#define CDMA_CS_FAIL BIT(14)
/* Command descriptor status - page erased. */
#define CDMA_CS_ERP BIT(11)
/* Command descriptor status - timeout occurred. */
#define CDMA_CS_TOUT BIT(10)
/*
* Maximum amount of correction applied to one ECC sector.
* It is part of command descriptor status.
*/
#define CDMA_CS_MAXERR GENMASK(9, 2)
/* Command descriptor status - uncorrectable ECC error. */
#define CDMA_CS_UNCE BIT(1)
/* Command descriptor status - descriptor error. */
#define CDMA_CS_ERR BIT(0)
/* Status of operation - OK. */
#define STAT_OK 0
/* Status of operation - FAIL. */
#define STAT_FAIL 2
/* Status of operation - uncorrectable ECC error. */
#define STAT_ECC_UNCORR 3
/* Status of operation - page erased. */
#define STAT_ERASED 5
/* Status of operation - correctable ECC error. */
#define STAT_ECC_CORR 6
/* Status of operation - unsuspected state. */
#define STAT_UNKNOWN 7
/* Status of operation - operation is not completed yet. */
#define STAT_BUSY 0xFF
#define BCH_MAX_NUM_CORR_CAPS 8
#define BCH_MAX_NUM_SECTOR_SIZES 2
struct cadence_nand_timings {
u32 async_toggle_timings;
u32 timings0;
u32 timings1;
u32 timings2;
u32 dll_phy_ctrl;
u32 phy_ctrl;
u32 phy_dqs_timing;
u32 phy_gate_lpbk_ctrl;
};
/* Command DMA descriptor. */
struct cadence_nand_cdma_desc {
/* Next descriptor address. */
u64 next_pointer;
/* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */
u32 flash_pointer;
/*field appears in HPNFC version 13*/
u16 bank;
u16 rsvd0;
/* Operation the controller needs to perform. */
u16 command_type;
u16 rsvd1;
/* Flags for operation of this command. */
u16 command_flags;
u16 rsvd2;
/* System/host memory address required for data DMA commands. */
u64 memory_pointer;
/* Status of operation. */
u32 status;
u32 rsvd3;
/* Address pointer to sync buffer location. */
u64 sync_flag_pointer;
/* Controls the buffer sync mechanism. */
u32 sync_arguments;
u32 rsvd4;
/* Control data pointer. */
u64 ctrl_data_ptr;
};
/* Interrupt status. */
struct cadence_nand_irq_status {
/* Thread operation complete status. */
u32 trd_status;
/* Thread operation error. */
u32 trd_error;
/* Controller status. */
u32 status;
};
/* Cadence NAND flash controller capabilities get from driver data. */
struct cadence_nand_dt_devdata {
/* Skew value of the output signals of the NAND Flash interface. */
u32 if_skew;
/* It informs if slave DMA interface is connected to DMA engine. */
unsigned int has_dma:1;
};
/* Cadence NAND flash controller capabilities read from registers. */
struct cdns_nand_caps {
/* Maximum number of banks supported by hardware. */
u8 max_banks;
/* Slave and Master DMA data width in bytes (4 or 8). */
u8 data_dma_width;
/* Control Data feature supported. */
bool data_control_supp;
/* Is PHY type DLL. */
bool is_phy_type_dll;
};
struct cdns_nand_ctrl {
struct device *dev;
struct nand_controller controller;
struct cadence_nand_cdma_desc *cdma_desc;
/* IP capability. */
const struct cadence_nand_dt_devdata *caps1;
struct cdns_nand_caps caps2;
u8 ctrl_rev;
dma_addr_t dma_cdma_desc;
u8 *buf;
u32 buf_size;
u8 curr_corr_str_idx;
/* Register interface. */
void __iomem *reg;
struct {
void __iomem *virt;
dma_addr_t dma;
} io;
int irq;
/* Interrupts that have happened. */
struct cadence_nand_irq_status irq_status;
/* Interrupts we are waiting for. */
struct cadence_nand_irq_status irq_mask;
struct completion complete;
/* Protect irq_mask and irq_status. */
spinlock_t irq_lock;
int ecc_strengths[BCH_MAX_NUM_CORR_CAPS];
struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES];
struct nand_ecc_caps ecc_caps;
int curr_trans_type;
struct dma_chan *dmac;
u32 nf_clk_rate;
/*
* Estimated Board delay. The value includes the total
* round trip delay for the signals and is used for deciding on values
* associated with data read capture.
*/
u32 board_delay;
struct nand_chip *selected_chip;
unsigned long assigned_cs;
struct list_head chips;
u8 bch_metadata_size;
};
struct cdns_nand_chip {
struct cadence_nand_timings timings;
struct nand_chip chip;
u8 nsels;
struct list_head node;
/*
* part of oob area of NAND flash memory page.
* This part is available for user to read or write.
*/
u32 avail_oob_size;
/* Sector size. There are few sectors per mtd->writesize */
u32 sector_size;
u32 sector_count;
/* Offset of BBM. */
u8 bbm_offs;
/* Number of bytes reserved for BBM. */
u8 bbm_len;
/* ECC strength index. */
u8 corr_str_idx;
u8 cs[];
};
struct ecc_info {
int (*calc_ecc_bytes)(int step_size, int strength);
int max_step_size;
};
static inline struct
cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip)
{
return container_of(chip, struct cdns_nand_chip, chip);
}
static inline struct
cdns_nand_ctrl *to_cdns_nand_ctrl(struct nand_controller *controller)
{
return container_of(controller, struct cdns_nand_ctrl, controller);
}
static bool
cadence_nand_dma_buf_ok(struct cdns_nand_ctrl *cdns_ctrl, const void *buf,
u32 buf_len)
{
u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
return buf && virt_addr_valid(buf) &&
likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) &&
likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN));
}
static int cadence_nand_wait_for_value(struct cdns_nand_ctrl *cdns_ctrl,
u32 reg_offset, u32 timeout_us,
u32 mask, bool is_clear)
{
u32 val;
int ret;
ret = readl_relaxed_poll_timeout(cdns_ctrl->reg + reg_offset,
val, !(val & mask) == is_clear,
10, timeout_us);
if (ret < 0) {
dev_err(cdns_ctrl->dev,
"Timeout while waiting for reg %x with mask %x is clear %d\n",
reg_offset, mask, is_clear);
}
return ret;
}
static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl *cdns_ctrl,
bool enable)
{
u32 reg;
if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1000000,
CTRL_STATUS_CTRL_BUSY, true))
return -ETIMEDOUT;
reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
if (enable)
reg |= ECC_CONFIG_0_ECC_EN;
else
reg &= ~ECC_CONFIG_0_ECC_EN;
writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
return 0;
}
static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl *cdns_ctrl,
u8 corr_str_idx)
{
u32 reg;
if (cdns_ctrl->curr_corr_str_idx == corr_str_idx)
return;
reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
reg &= ~ECC_CONFIG_0_CORR_STR;
reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx);
writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
cdns_ctrl->curr_corr_str_idx = corr_str_idx;
}
static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl *cdns_ctrl,
u8 strength)
{
int i, corr_str_idx = -1;
for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
if (cdns_ctrl->ecc_strengths[i] == strength) {
corr_str_idx = i;
break;
}
}
return corr_str_idx;
}
static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl *cdns_ctrl,
u16 marker_value)
{
u32 reg;
if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1000000,
CTRL_STATUS_CTRL_BUSY, true))
return -ETIMEDOUT;
reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
reg &= ~SKIP_BYTES_MARKER_VALUE;
reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE,
marker_value);
writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
return 0;
}
static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl *cdns_ctrl,
u8 num_of_bytes,
u32 offset_value,
int enable)
{
u32 reg, skip_bytes_offset;
if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1000000,
CTRL_STATUS_CTRL_BUSY, true))
return -ETIMEDOUT;
if (!enable) {
num_of_bytes = 0;
offset_value = 0;
}
reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
reg &= ~SKIP_BYTES_NUM_OF_BYTES;
reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES,
num_of_bytes);
skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE,
offset_value);
writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
writel_relaxed(skip_bytes_offset, cdns_ctrl->reg + SKIP_BYTES_OFFSET);
return 0;
}
/* Functions enables/disables hardware detection of erased data */
static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl *cdns_ctrl,
bool enable,
u8 bitflips_threshold)
{
u32 reg;
reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
if (enable)
reg |= ECC_CONFIG_0_ERASE_DET_EN;
else
reg &= ~ECC_CONFIG_0_ERASE_DET_EN;
writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
writel_relaxed(bitflips_threshold, cdns_ctrl->reg + ECC_CONFIG_1);
}
static int cadence_nand_set_access_width16(struct cdns_nand_ctrl *cdns_ctrl,
bool bit_bus16)
{
u32 reg;
if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1000000,
CTRL_STATUS_CTRL_BUSY, true))
return -ETIMEDOUT;
reg = readl_relaxed(cdns_ctrl->reg + COMMON_SET);
if (!bit_bus16)
reg &= ~COMMON_SET_DEVICE_16BIT;
else
reg |= COMMON_SET_DEVICE_16BIT;
writel_relaxed(reg, cdns_ctrl->reg + COMMON_SET);
return 0;
}
static void
cadence_nand_clear_interrupt(struct cdns_nand_ctrl *cdns_ctrl,
struct cadence_nand_irq_status *irq_status)
{
writel_relaxed(irq_status->status, cdns_ctrl->reg + INTR_STATUS);
writel_relaxed(irq_status->trd_status,
cdns_ctrl->reg + TRD_COMP_INT_STATUS);
writel_relaxed(irq_status->trd_error,
cdns_ctrl->reg + TRD_ERR_INT_STATUS);
}
static void
cadence_nand_read_int_status(struct cdns_nand_ctrl *cdns_ctrl,
struct cadence_nand_irq_status *irq_status)
{
irq_status->status = readl_relaxed(cdns_ctrl->reg + INTR_STATUS);
irq_status->trd_status = readl_relaxed(cdns_ctrl->reg
+ TRD_COMP_INT_STATUS);
irq_status->trd_error = readl_relaxed(cdns_ctrl->reg
+ TRD_ERR_INT_STATUS);
}
static u32 irq_detected(struct cdns_nand_ctrl *cdns_ctrl,
struct cadence_nand_irq_status *irq_status)
{
cadence_nand_read_int_status(cdns_ctrl, irq_status);
return irq_status->status || irq_status->trd_status ||
irq_status->trd_error;
}
static void cadence_nand_reset_irq(struct cdns_nand_ctrl *cdns_ctrl)
{
unsigned long flags;
spin_lock_irqsave(&cdns_ctrl->irq_lock, flags);
memset(&cdns_ctrl->irq_status, 0, sizeof(cdns_ctrl->irq_status));
memset(&cdns_ctrl->irq_mask, 0, sizeof(cdns_ctrl->irq_mask));
spin_unlock_irqrestore(&cdns_ctrl->irq_lock, flags);
}
/*
* This is the interrupt service routine. It handles all interrupts
* sent to this device.
*/
static irqreturn_t cadence_nand_isr(int irq, void *dev_id)
{
struct cdns_nand_ctrl *cdns_ctrl = dev_id;
struct cadence_nand_irq_status irq_status;
irqreturn_t result = IRQ_NONE;
spin_lock(&cdns_ctrl->irq_lock);
if (irq_detected(cdns_ctrl, &irq_status)) {
/* Handle interrupt. */
/* First acknowledge it. */
cadence_nand_clear_interrupt(cdns_ctrl, &irq_status);
/* Status in the device context for someone to read. */
cdns_ctrl->irq_status.status |= irq_status.status;
cdns_ctrl->irq_status.trd_status |= irq_status.trd_status;
cdns_ctrl->irq_status.trd_error |= irq_status.trd_error;
/* Notify anyone who cares that it happened. */
complete(&cdns_ctrl->complete);
/* Tell the OS that we've handled this. */
result = IRQ_HANDLED;
}
spin_unlock(&cdns_ctrl->irq_lock);
return result;
}
static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl *cdns_ctrl,
struct cadence_nand_irq_status *irq_mask)
{
writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status,
cdns_ctrl->reg + INTR_ENABLE);
writel_relaxed(irq_mask->trd_error,
cdns_ctrl->reg + TRD_ERR_INT_STATUS_EN);
}
static void
cadence_nand_wait_for_irq(struct cdns_nand_ctrl *cdns_ctrl,
struct cadence_nand_irq_status *irq_mask,
struct cadence_nand_irq_status *irq_status)
{
unsigned long timeout = msecs_to_jiffies(10000);
unsigned long time_left;
time_left = wait_for_completion_timeout(&cdns_ctrl->complete,
timeout);
*irq_status = cdns_ctrl->irq_status;
if (time_left == 0) {
/* Timeout error. */
dev_err(cdns_ctrl->dev, "timeout occurred:\n");
dev_err(cdns_ctrl->dev, "\tstatus = 0x%x, mask = 0x%x\n",
irq_status->status, irq_mask->status);
dev_err(cdns_ctrl->dev,
"\ttrd_status = 0x%x, trd_status mask = 0x%x\n",
irq_status->trd_status, irq_mask->trd_status);
dev_err(cdns_ctrl->dev,
"\t trd_error = 0x%x, trd_error mask = 0x%x\n",
irq_status->trd_error, irq_mask->trd_error);
}
}
/* Execute generic command on NAND controller. */
static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl *cdns_ctrl,
u8 chip_nr,
u64 mini_ctrl_cmd)
{
u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg;
mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr);
mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF;
mini_ctrl_cmd_h = mini_ctrl_cmd >> 32;
if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1000000,
CTRL_STATUS_CTRL_BUSY, true))
return -ETIMEDOUT;
cadence_nand_reset_irq(cdns_ctrl);
writel_relaxed(mini_ctrl_cmd_l, cdns_ctrl->reg + CMD_REG2);
writel_relaxed(mini_ctrl_cmd_h, cdns_ctrl->reg + CMD_REG3);
/* Select generic command. */
reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN);
/* Thread number. */
reg |= FIELD_PREP(CMD_REG0_TN, 0);
/* Issue command. */
writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
return 0;
}
/* Wait for data on slave DMA interface. */
static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl *cdns_ctrl,
u8 *out_sdma_trd,
u32 *out_sdma_size)
{
struct cadence_nand_irq_status irq_mask, irq_status;
irq_mask.trd_status = 0;
irq_mask.trd_error = 0;
irq_mask.status = INTR_STATUS_SDMA_TRIGG
| INTR_STATUS_SDMA_ERR
| INTR_STATUS_UNSUPP_CMD;
cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
if (irq_status.status == 0) {
dev_err(cdns_ctrl->dev, "Timeout while waiting for SDMA\n");
return -ETIMEDOUT;
}
if (irq_status.status & INTR_STATUS_SDMA_TRIGG) {
*out_sdma_size = readl_relaxed(cdns_ctrl->reg + SDMA_SIZE);
*out_sdma_trd = readl_relaxed(cdns_ctrl->reg + SDMA_TRD_NUM);
*out_sdma_trd =
FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd);
} else {
dev_err(cdns_ctrl->dev, "SDMA error - irq_status %x\n",
irq_status.status);
return -EIO;
}
return 0;
}
static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
{
u32 reg;
reg = readl_relaxed(cdns_ctrl->reg + CTRL_FEATURES);
cdns_ctrl->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg);
if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg))
cdns_ctrl->caps2.data_dma_width = 8;
else
cdns_ctrl->caps2.data_dma_width = 4;
if (reg & CTRL_FEATURES_CONTROL_DATA)
cdns_ctrl->caps2.data_control_supp = true;
if (reg & (CTRL_FEATURES_NVDDR_2_3
| CTRL_FEATURES_NVDDR))
cdns_ctrl->caps2.is_phy_type_dll = true;
}
/* Prepare CDMA descriptor. */
static void
cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr,
dma_addr_t ctrl_data_ptr, u16 ctype)
{
struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc));
/* Set fields for one descriptor. */
cdma_desc->flash_pointer = flash_ptr;
if (cdns_ctrl->ctrl_rev >= 13)
cdma_desc->bank = nf_mem;
else
cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT);
cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
cdma_desc->command_flags |= CDMA_CF_INT;
cdma_desc->memory_pointer = mem_ptr;
cdma_desc->status = 0;
cdma_desc->sync_flag_pointer = 0;
cdma_desc->sync_arguments = 0;
cdma_desc->command_type = ctype;
cdma_desc->ctrl_data_ptr = ctrl_data_ptr;
}
static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
u32 desc_status)
{
if (desc_status & CDMA_CS_ERP)
return STAT_ERASED;
if (desc_status & CDMA_CS_UNCE)
return STAT_ECC_UNCORR;
if (desc_status & CDMA_CS_ERR) {
dev_err(cdns_ctrl->dev, ":CDMA desc error flag detected.\n");
return STAT_FAIL;
}
if (FIELD_GET(CDMA_CS_MAXERR, desc_status))
return STAT_ECC_CORR;
return STAT_FAIL;
}
static int cadence_nand_cdma_finish(struct cdns_nand_ctrl *cdns_ctrl)
{
struct cadence_nand_cdma_desc *desc_ptr = cdns_ctrl->cdma_desc;
u8 status = STAT_BUSY;
if (desc_ptr->status & CDMA_CS_FAIL) {
status = cadence_nand_check_desc_error(cdns_ctrl,
desc_ptr->status);
dev_err(cdns_ctrl->dev, ":CDMA error %x\n", desc_ptr->status);
} else if (desc_ptr->status & CDMA_CS_COMP) {
/* Descriptor finished with no errors. */
if (desc_ptr->command_flags & CDMA_CF_CONT) {
dev_info(cdns_ctrl->dev, "DMA unsupported flag is set");
status = STAT_UNKNOWN;
} else {
/* Last descriptor. */
status = STAT_OK;
}
}
return status;
}
static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
u8 thread)
{
u32 reg;
int status;
/* Wait for thread ready. */
status = cadence_nand_wait_for_value(cdns_ctrl, TRD_STATUS,
1000000,
BIT(thread), true);
if (status)
return status;
cadence_nand_reset_irq(cdns_ctrl);
reinit_completion(&cdns_ctrl->complete);
writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
cdns_ctrl->reg + CMD_REG2);
writel_relaxed(0, cdns_ctrl->reg + CMD_REG3);
/* Select CDMA mode. */
reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA);
/* Thread number. */
reg |= FIELD_PREP(CMD_REG0_TN, thread);
/* Issue command. */
writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
return 0;
}
/* Send SDMA command and wait for finish. */
static u32
cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl *cdns_ctrl,
u8 thread)
{
struct cadence_nand_irq_status irq_mask, irq_status = {0};
int status;
irq_mask.trd_status = BIT(thread);
irq_mask.trd_error = BIT(thread);
irq_mask.status = INTR_STATUS_CDMA_TERR;
cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
status = cadence_nand_cdma_send(cdns_ctrl, thread);
if (status)
return status;
cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
if (irq_status.status == 0 && irq_status.trd_status == 0 &&
irq_status.trd_error == 0) {
dev_err(cdns_ctrl->dev, "CDMA command timeout\n");
return -ETIMEDOUT;
}
if (irq_status.status & irq_mask.status) {
dev_err(cdns_ctrl->dev, "CDMA command failed\n");
return -EIO;
}
return 0;
}
/*
* ECC size depends on configured ECC strength and on maximum supported
* ECC step size.
*/
static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength)
{
int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8);
return ALIGN(nbytes, 2);
}
#define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \
static int \
cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \
int strength)\
{\
return cadence_nand_calc_ecc_bytes(max_step_size, strength);\
}
CADENCE_NAND_CALC_ECC_BYTES(256)
CADENCE_NAND_CALC_ECC_BYTES(512)
CADENCE_NAND_CALC_ECC_BYTES(1024)
CADENCE_NAND_CALC_ECC_BYTES(2048)
CADENCE_NAND_CALC_ECC_BYTES(4096)
/* Function reads BCH capabilities. */
static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
{
struct nand_ecc_caps *ecc_caps = &cdns_ctrl->ecc_caps;
int max_step_size = 0, nstrengths, i;
u32 reg;
reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_3);
cdns_ctrl->bch_metadata_size = FIELD_GET(BCH_CFG_3_METADATA_SIZE, reg);
if (cdns_ctrl->bch_metadata_size < 4) {
dev_err(cdns_ctrl->dev,
"Driver needs at least 4 bytes of BCH meta data\n");
return -EIO;
}
reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
cdns_ctrl->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg);
cdns_ctrl->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg);
reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_1);
cdns_ctrl->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg);
cdns_ctrl->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg);
cdns_ctrl->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg);
cdns_ctrl->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg);
reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_2);
cdns_ctrl->ecc_stepinfos[0].stepsize =
FIELD_GET(BCH_CFG_2_SECT_0, reg);
cdns_ctrl->ecc_stepinfos[1].stepsize =
FIELD_GET(BCH_CFG_2_SECT_1, reg);
nstrengths = 0;
for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
if (cdns_ctrl->ecc_strengths[i] != 0)
nstrengths++;
}
ecc_caps->nstepinfos = 0;
for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) {
/* ECC strengths are common for all step infos. */
cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths;
cdns_ctrl->ecc_stepinfos[i].strengths =
cdns_ctrl->ecc_strengths;
if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0)
ecc_caps->nstepinfos++;
if (cdns_ctrl->ecc_stepinfos[i].stepsize > max_step_size)
max_step_size = cdns_ctrl->ecc_stepinfos[i].stepsize;
}
ecc_caps->stepinfos = &cdns_ctrl->ecc_stepinfos[0];
switch (max_step_size) {
case 256:
ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256;
break;
case 512:
ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512;
break;
case 1024:
ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024;
break;
case 2048:
ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048;
break;
case 4096:
ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096;
break;
default:
dev_err(cdns_ctrl->dev,
"Unsupported sector size(ecc step size) %d\n",
max_step_size);
return -EIO;
}
return 0;
}
/* Hardware initialization. */
static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
{
int status;
u32 reg;
status = cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1000000,
CTRL_STATUS_INIT_COMP, false);
if (status)
return status;
reg = readl_relaxed(cdns_ctrl->reg + CTRL_VERSION);
cdns_ctrl->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg);
dev_info(cdns_ctrl->dev,
"%s: cadence nand controller version reg %x\n",
__func__, reg);
/* Disable cache and multiplane. */
writel_relaxed(0, cdns_ctrl->reg + MULTIPLANE_CFG);
writel_relaxed(0, cdns_ctrl->reg + CACHE_CFG);
/* Clear all interrupts. */
writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
cadence_nand_get_caps(cdns_ctrl);
if (cadence_nand_read_bch_caps(cdns_ctrl))
return -EIO;
#ifndef CONFIG_64BIT
if (cdns_ctrl->caps2.data_dma_width == 8) {
dev_err(cdns_ctrl->dev,
"cannot access 64-bit dma on !64-bit architectures");
return -EIO;
}
#endif
/*
* Set IO width access to 8.
* It is because during SW device discovering width access
* is expected to be 8.
*/
status = cadence_nand_set_access_width16(cdns_ctrl, false);
return status;
}
#define TT_MAIN_OOB_AREAS 2
#define TT_RAW_PAGE 3
#define TT_BBM 4
#define TT_MAIN_OOB_AREA_EXT 5
/* Prepare size of data to transfer. */
static void
cadence_nand_prepare_data_size(struct nand_chip *chip,
int transfer_type)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
u32 sec_size = 0, offset = 0, sec_cnt = 1;
u32 last_sec_size = cdns_chip->sector_size;
u32 data_ctrl_size = 0;
u32 reg = 0;
if (cdns_ctrl->curr_trans_type == transfer_type)
return;
switch (transfer_type) {
case TT_MAIN_OOB_AREA_EXT:
sec_cnt = cdns_chip->sector_count;
sec_size = cdns_chip->sector_size;
data_ctrl_size = cdns_chip->avail_oob_size;
break;
case TT_MAIN_OOB_AREAS:
sec_cnt = cdns_chip->sector_count;
last_sec_size = cdns_chip->sector_size
+ cdns_chip->avail_oob_size;
sec_size = cdns_chip->sector_size;
break;
case TT_RAW_PAGE:
last_sec_size = mtd->writesize + mtd->oobsize;
break;
case TT_BBM:
offset = mtd->writesize + cdns_chip->bbm_offs;
last_sec_size = 8;
break;
}
reg = 0;
reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset);
reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt);
writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_0);
reg = 0;
reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size);
reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size);
writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_1);
if (cdns_ctrl->caps2.data_control_supp) {
reg = readl_relaxed(cdns_ctrl->reg + CONTROL_DATA_CTRL);
reg &= ~CONTROL_DATA_CTRL_SIZE;
reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size);
writel_relaxed(reg, cdns_ctrl->reg + CONTROL_DATA_CTRL);
}
cdns_ctrl->curr_trans_type = transfer_type;
}
static int
cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
int page, void *buf, void *ctrl_dat, u32 buf_size,
u32 ctrl_dat_size, enum dma_data_direction dir,
bool with_ecc)
{
dma_addr_t dma_buf, dma_ctrl_dat = 0;
u8 thread_nr = chip_nr;
int status;
u16 ctype;
if (dir == DMA_FROM_DEVICE)
ctype = CDMA_CT_RD;
else
ctype = CDMA_CT_WR;
cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc);
dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir);
if (dma_mapping_error(cdns_ctrl->dev, dma_buf)) {
dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
return -EIO;
}
if (ctrl_dat && ctrl_dat_size) {
dma_ctrl_dat = dma_map_single(cdns_ctrl->dev, ctrl_dat,
ctrl_dat_size, dir);
if (dma_mapping_error(cdns_ctrl->dev, dma_ctrl_dat)) {
dma_unmap_single(cdns_ctrl->dev, dma_buf,
buf_size, dir);
dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
return -EIO;
}
}
cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
dma_buf, dma_ctrl_dat, ctype);
status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
dma_unmap_single(cdns_ctrl->dev, dma_buf,
buf_size, dir);
if (ctrl_dat && ctrl_dat_size)
dma_unmap_single(cdns_ctrl->dev, dma_ctrl_dat,
ctrl_dat_size, dir);
if (status)
return status;
return cadence_nand_cdma_finish(cdns_ctrl);
}
static void cadence_nand_set_timings(struct cdns_nand_ctrl *cdns_ctrl,
struct cadence_nand_timings *t)
{
writel_relaxed(t->async_toggle_timings,
cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS);
writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0);
writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1);
writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2);
if (cdns_ctrl->caps2.is_phy_type_dll)
writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL);
writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL);
if (cdns_ctrl->caps2.is_phy_type_dll) {
writel_relaxed(0, cdns_ctrl->reg + PHY_TSEL);
writel_relaxed(2, cdns_ctrl->reg + PHY_DQ_TIMING);
writel_relaxed(t->phy_dqs_timing,
cdns_ctrl->reg + PHY_DQS_TIMING);
writel_relaxed(t->phy_gate_lpbk_ctrl,
cdns_ctrl->reg + PHY_GATE_LPBK_CTRL);
writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE,
cdns_ctrl->reg + PHY_DLL_MASTER_CTRL);
writel_relaxed(0, cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL);
}
}
static int cadence_nand_select_target(struct nand_chip *chip)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
if (chip == cdns_ctrl->selected_chip)
return 0;
if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1000000,
CTRL_STATUS_CTRL_BUSY, true))
return -ETIMEDOUT;
cadence_nand_set_timings(cdns_ctrl, &cdns_chip->timings);
cadence_nand_set_ecc_strength(cdns_ctrl,
cdns_chip->corr_str_idx);
cadence_nand_set_erase_detection(cdns_ctrl, true,
chip->ecc.strength);
cdns_ctrl->curr_trans_type = -1;
cdns_ctrl->selected_chip = chip;
return 0;
}
static int cadence_nand_erase(struct nand_chip *chip, u32 page)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
int status;
u8 thread_nr = cdns_chip->cs[chip->cur_cs];
cadence_nand_cdma_desc_prepare(cdns_ctrl,
cdns_chip->cs[chip->cur_cs],
page, 0, 0,
CDMA_CT_ERASE);
status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
if (status) {
dev_err(cdns_ctrl->dev, "erase operation failed\n");
return -EIO;
}
status = cadence_nand_cdma_finish(cdns_ctrl);
if (status)
return status;
return 0;
}
static int cadence_nand_read_bbm(struct nand_chip *chip, int page, u8 *buf)
{
int status;
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
cadence_nand_prepare_data_size(chip, TT_BBM);
cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
/*
* Read only bad block marker from offset
* defined by a memory manufacturer.
*/
status = cadence_nand_cdma_transfer(cdns_ctrl,
cdns_chip->cs[chip->cur_cs],
page, cdns_ctrl->buf, NULL,
mtd->oobsize,
0, DMA_FROM_DEVICE, false);
if (status) {
dev_err(cdns_ctrl->dev, "read BBM failed\n");
return -EIO;
}
memcpy(buf + cdns_chip->bbm_offs, cdns_ctrl->buf, cdns_chip->bbm_len);
return 0;
}
static int cadence_nand_write_page(struct nand_chip *chip,
const u8 *buf, int oob_required,
int page)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
int status;
u16 marker_val = 0xFFFF;
status = cadence_nand_select_target(chip);
if (status)
return status;
cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
mtd->writesize
+ cdns_chip->bbm_offs,
1);
if (oob_required) {
marker_val = *(u16 *)(chip->oob_poi
+ cdns_chip->bbm_offs);
} else {
/* Set oob data to 0xFF. */
memset(cdns_ctrl->buf + mtd->writesize, 0xFF,
cdns_chip->avail_oob_size);
}
cadence_nand_set_skip_marker_val(cdns_ctrl, marker_val);
cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
cdns_ctrl->caps2.data_control_supp) {
u8 *oob;
if (oob_required)
oob = chip->oob_poi;
else
oob = cdns_ctrl->buf + mtd->writesize;
status = cadence_nand_cdma_transfer(cdns_ctrl,
cdns_chip->cs[chip->cur_cs],
page, (void *)buf, oob,
mtd->writesize,
cdns_chip->avail_oob_size,
DMA_TO_DEVICE, true);
if (status) {
dev_err(cdns_ctrl->dev, "write page failed\n");
return -EIO;
}
return 0;
}
if (oob_required) {
/* Transfer the data to the oob area. */
memcpy(cdns_ctrl->buf + mtd->writesize, chip->oob_poi,
cdns_chip->avail_oob_size);
}
memcpy(cdns_ctrl->buf, buf, mtd->writesize);
cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
return cadence_nand_cdma_transfer(cdns_ctrl,
cdns_chip->cs[chip->cur_cs],
page, cdns_ctrl->buf, NULL,
mtd->writesize
+ cdns_chip->avail_oob_size,
0, DMA_TO_DEVICE, true);
}
static int cadence_nand_write_oob(struct nand_chip *chip, int page)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
memset(cdns_ctrl->buf, 0xFF, mtd->writesize);
return cadence_nand_write_page(chip, cdns_ctrl->buf, 1, page);
}
static int cadence_nand_write_page_raw(struct nand_chip *chip,
const u8 *buf, int oob_required,
int page)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
int writesize = mtd->writesize;
int oobsize = mtd->oobsize;
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
void *tmp_buf = cdns_ctrl->buf;
int oob_skip = cdns_chip->bbm_len;
size_t size = writesize + oobsize;
int i, pos, len;
int status = 0;
status = cadence_nand_select_target(chip);
if (status)
return status;
/*
* Fill the buffer with 0xff first except the full page transfer.
* This simplifies the logic.
*/
if (!buf || !oob_required)
memset(tmp_buf, 0xff, size);
cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
/* Arrange the buffer for syndrome payload/ecc layout. */
if (buf) {
for (i = 0; i < ecc_steps; i++) {
pos = i * (ecc_size + ecc_bytes);
len = ecc_size;
if (pos >= writesize)
pos += oob_skip;
else if (pos + len > writesize)
len = writesize - pos;
memcpy(tmp_buf + pos, buf, len);
buf += len;
if (len < ecc_size) {
len = ecc_size - len;
memcpy(tmp_buf + writesize + oob_skip, buf,
len);
buf += len;
}
}
}
if (oob_required) {
const u8 *oob = chip->oob_poi;
u32 oob_data_offset = (cdns_chip->sector_count - 1) *
(cdns_chip->sector_size + chip->ecc.bytes)
+ cdns_chip->sector_size + oob_skip;
/* BBM at the beginning of the OOB area. */
memcpy(tmp_buf + writesize, oob, oob_skip);
/* OOB free. */
memcpy(tmp_buf + oob_data_offset, oob,
cdns_chip->avail_oob_size);
oob += cdns_chip->avail_oob_size;
/* OOB ECC. */
for (i = 0; i < ecc_steps; i++) {
pos = ecc_size + i * (ecc_size + ecc_bytes);
if (i == (ecc_steps - 1))
pos += cdns_chip->avail_oob_size;
len = ecc_bytes;
if (pos >= writesize)
pos += oob_skip;
else if (pos + len > writesize)
len = writesize - pos;
memcpy(tmp_buf + pos, oob, len);
oob += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
memcpy(tmp_buf + writesize + oob_skip, oob,
len);
oob += len;
}
}
}
cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
return cadence_nand_cdma_transfer(cdns_ctrl,
cdns_chip->cs[chip->cur_cs],
page, cdns_ctrl->buf, NULL,
mtd->writesize +
mtd->oobsize,
0, DMA_TO_DEVICE, false);
}
static int cadence_nand_write_oob_raw(struct nand_chip *chip,
int page)
{
return cadence_nand_write_page_raw(chip, NULL, true, page);
}
static int cadence_nand_read_page(struct nand_chip *chip,
u8 *buf, int oob_required, int page)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
int status = 0;
int ecc_err_count = 0;
status = cadence_nand_select_target(chip);
if (status)
return status;
cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
mtd->writesize
+ cdns_chip->bbm_offs, 1);
/*
* If data buffer can be accessed by DMA and data_control feature
* is supported then transfer data and oob directly.
*/
if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
cdns_ctrl->caps2.data_control_supp) {
u8 *oob;
if (oob_required)
oob = chip->oob_poi;
else
oob = cdns_ctrl->buf + mtd->writesize;
cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
status = cadence_nand_cdma_transfer(cdns_ctrl,
cdns_chip->cs[chip->cur_cs],
page, buf, oob,
mtd->writesize,
cdns_chip->avail_oob_size,
DMA_FROM_DEVICE, true);
/* Otherwise use bounce buffer. */
} else {
cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
status = cadence_nand_cdma_transfer(cdns_ctrl,
cdns_chip->cs[chip->cur_cs],
page, cdns_ctrl->buf,
NULL, mtd->writesize
+ cdns_chip->avail_oob_size,
0, DMA_FROM_DEVICE, true);
memcpy(buf, cdns_ctrl->buf, mtd->writesize);
if (oob_required)
memcpy(chip->oob_poi,
cdns_ctrl->buf + mtd->writesize,
mtd->oobsize);
}
switch (status) {
case STAT_ECC_UNCORR:
mtd->ecc_stats.failed++;
ecc_err_count++;
break;
case STAT_ECC_CORR:
ecc_err_count = FIELD_GET(CDMA_CS_MAXERR,
cdns_ctrl->cdma_desc->status);
mtd->ecc_stats.corrected += ecc_err_count;
break;
case STAT_ERASED:
case STAT_OK:
break;
default:
dev_err(cdns_ctrl->dev, "read page failed\n");
return -EIO;
}
if (oob_required)
if (cadence_nand_read_bbm(chip, page, chip->oob_poi))
return -EIO;
return ecc_err_count;
}
/* Reads OOB data from the device. */
static int cadence_nand_read_oob(struct nand_chip *chip, int page)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
return cadence_nand_read_page(chip, cdns_ctrl->buf, 1, page);
}
static int cadence_nand_read_page_raw(struct nand_chip *chip,
u8 *buf, int oob_required, int page)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
int oob_skip = cdns_chip->bbm_len;
int writesize = mtd->writesize;
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
void *tmp_buf = cdns_ctrl->buf;
int i, pos, len;
int status = 0;
status = cadence_nand_select_target(chip);
if (status)
return status;
cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
status = cadence_nand_cdma_transfer(cdns_ctrl,
cdns_chip->cs[chip->cur_cs],
page, cdns_ctrl->buf, NULL,
mtd->writesize
+ mtd->oobsize,
0, DMA_FROM_DEVICE, false);
switch (status) {
case STAT_ERASED:
case STAT_OK:
break;
default:
dev_err(cdns_ctrl->dev, "read raw page failed\n");
return -EIO;
}
/* Arrange the buffer for syndrome payload/ecc layout. */
if (buf) {
for (i = 0; i < ecc_steps; i++) {
pos = i * (ecc_size + ecc_bytes);
len = ecc_size;
if (pos >= writesize)
pos += oob_skip;
else if (pos + len > writesize)
len = writesize - pos;
memcpy(buf, tmp_buf + pos, len);
buf += len;
if (len < ecc_size) {
len = ecc_size - len;
memcpy(buf, tmp_buf + writesize + oob_skip,
len);
buf += len;
}
}
}
if (oob_required) {
u8 *oob = chip->oob_poi;
u32 oob_data_offset = (cdns_chip->sector_count - 1) *
(cdns_chip->sector_size + chip->ecc.bytes)
+ cdns_chip->sector_size + oob_skip;
/* OOB free. */
memcpy(oob, tmp_buf + oob_data_offset,
cdns_chip->avail_oob_size);
/* BBM at the beginning of the OOB area. */
memcpy(oob, tmp_buf + writesize, oob_skip);
oob += cdns_chip->avail_oob_size;
/* OOB ECC */
for (i = 0; i < ecc_steps; i++) {
pos = ecc_size + i * (ecc_size + ecc_bytes);
len = ecc_bytes;
if (i == (ecc_steps - 1))
pos += cdns_chip->avail_oob_size;
if (pos >= writesize)
pos += oob_skip;
else if (pos + len > writesize)
len = writesize - pos;
memcpy(oob, tmp_buf + pos, len);
oob += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
memcpy(oob, tmp_buf + writesize + oob_skip,
len);
oob += len;
}
}
}
return 0;
}
static int cadence_nand_read_oob_raw(struct nand_chip *chip,
int page)
{
return cadence_nand_read_page_raw(chip, NULL, true, page);
}
static void cadence_nand_slave_dma_transfer_finished(void *data)
{
struct completion *finished = data;
complete(finished);
}
static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
void *buf,
dma_addr_t dev_dma, size_t len,
enum dma_data_direction dir)
{
DECLARE_COMPLETION_ONSTACK(finished);
struct dma_chan *chan;
struct dma_device *dma_dev;
dma_addr_t src_dma, dst_dma, buf_dma;
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
chan = cdns_ctrl->dmac;
dma_dev = chan->device;
buf_dma = dma_map_single(dma_dev->dev, buf, len, dir);
if (dma_mapping_error(dma_dev->dev, buf_dma)) {
dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
goto err;
}
if (dir == DMA_FROM_DEVICE) {
src_dma = cdns_ctrl->io.dma;
dst_dma = buf_dma;
} else {
src_dma = buf_dma;
dst_dma = cdns_ctrl->io.dma;
}
tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(cdns_ctrl->dev, "Failed to prepare DMA memcpy\n");
goto err_unmap;
}
tx->callback = cadence_nand_slave_dma_transfer_finished;
tx->callback_param = &finished;
cookie = dmaengine_submit(tx);
if (dma_submit_error(cookie)) {
dev_err(cdns_ctrl->dev, "Failed to do DMA tx_submit\n");
goto err_unmap;
}
dma_async_issue_pending(cdns_ctrl->dmac);
wait_for_completion(&finished);
dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
return 0;
err_unmap:
dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
err:
dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
return -EIO;
}
static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl,
u8 *buf, int len)
{
u8 thread_nr = 0;
u32 sdma_size;
int status;
/* Wait until slave DMA interface is ready to data transfer. */
status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
if (status)
return status;
if (!cdns_ctrl->caps1->has_dma) {
u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
int len_in_words = (data_dma_width == 4) ? len >> 2 : len >> 3;
/* read alingment data */
if (data_dma_width == 4)
ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words);
#ifdef CONFIG_64BIT
else
readsq(cdns_ctrl->io.virt, buf, len_in_words);
#endif
if (sdma_size > len) {
int read_bytes = (data_dma_width == 4) ?
len_in_words << 2 : len_in_words << 3;
/* read rest data from slave DMA interface if any */
if (data_dma_width == 4)
ioread32_rep(cdns_ctrl->io.virt,
cdns_ctrl->buf,
sdma_size / 4 - len_in_words);
#ifdef CONFIG_64BIT
else
readsq(cdns_ctrl->io.virt, cdns_ctrl->buf,
sdma_size / 8 - len_in_words);
#endif
/* copy rest of data */
memcpy(buf + read_bytes, cdns_ctrl->buf,
len - read_bytes);
}
return 0;
}
if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf,
cdns_ctrl->io.dma,
len, DMA_FROM_DEVICE);
if (status == 0)
return 0;
dev_warn(cdns_ctrl->dev,
"Slave DMA transfer failed. Try again using bounce buffer.");
}
/* If DMA transfer is not possible or failed then use bounce buffer. */
status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
cdns_ctrl->io.dma,
sdma_size, DMA_FROM_DEVICE);
if (status) {
dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
return status;
}
memcpy(buf, cdns_ctrl->buf, len);
return 0;
}
static int cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl,
const u8 *buf, int len)
{
u8 thread_nr = 0;
u32 sdma_size;
int status;
/* Wait until slave DMA interface is ready to data transfer. */
status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
if (status)
return status;
if (!cdns_ctrl->caps1->has_dma) {
u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
int len_in_words = (data_dma_width == 4) ? len >> 2 : len >> 3;
if (data_dma_width == 4)
iowrite32_rep(cdns_ctrl->io.virt, buf, len_in_words);
#ifdef CONFIG_64BIT
else
writesq(cdns_ctrl->io.virt, buf, len_in_words);
#endif
if (sdma_size > len) {
int written_bytes = (data_dma_width == 4) ?
len_in_words << 2 : len_in_words << 3;
/* copy rest of data */
memcpy(cdns_ctrl->buf, buf + written_bytes,
len - written_bytes);
/* write all expected by nand controller data */
if (data_dma_width == 4)
iowrite32_rep(cdns_ctrl->io.virt,
cdns_ctrl->buf,
sdma_size / 4 - len_in_words);
#ifdef CONFIG_64BIT
else
writesq(cdns_ctrl->io.virt, cdns_ctrl->buf,
sdma_size / 8 - len_in_words);
#endif
}
return 0;
}
if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
status = cadence_nand_slave_dma_transfer(cdns_ctrl, (void *)buf,
cdns_ctrl->io.dma,
len, DMA_TO_DEVICE);
if (status == 0)
return 0;
dev_warn(cdns_ctrl->dev,
"Slave DMA transfer failed. Try again using bounce buffer.");
}
/* If DMA transfer is not possible or failed then use bounce buffer. */
memcpy(cdns_ctrl->buf, buf, len);
status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
cdns_ctrl->io.dma,
sdma_size, DMA_TO_DEVICE);
if (status)
dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
return status;
}
static int cadence_nand_force_byte_access(struct nand_chip *chip,
bool force_8bit)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
/*
* Callers of this function do not verify if the NAND is using a 16-bit
* an 8-bit bus for normal operations, so we need to take care of that
* here by leaving the configuration unchanged if the NAND does not have
* the NAND_BUSWIDTH_16 flag set.
*/
if (!(chip->options & NAND_BUSWIDTH_16))
return 0;
return cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
}
static int cadence_nand_cmd_opcode(struct nand_chip *chip,
const struct nand_subop *subop)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
const struct nand_op_instr *instr;
unsigned int op_id = 0;
u64 mini_ctrl_cmd = 0;
int ret;
instr = &subop->instrs[op_id];
if (instr->delay_ns > 0)
mini_ctrl_cmd |= GCMD_LAY_TWB;
mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
GCMD_LAY_INSTR_CMD);
mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD,
instr->ctx.cmd.opcode);
ret = cadence_nand_generic_cmd_send(cdns_ctrl,
cdns_chip->cs[chip->cur_cs],
mini_ctrl_cmd);
if (ret)
dev_err(cdns_ctrl->dev, "send cmd %x failed\n",
instr->ctx.cmd.opcode);
return ret;
}
static int cadence_nand_cmd_address(struct nand_chip *chip,
const struct nand_subop *subop)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
const struct nand_op_instr *instr;
unsigned int op_id = 0;
u64 mini_ctrl_cmd = 0;
unsigned int offset, naddrs;
u64 address = 0;
const u8 *addrs;
int ret;
int i;
instr = &subop->instrs[op_id];
if (instr->delay_ns > 0)
mini_ctrl_cmd |= GCMD_LAY_TWB;
mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
GCMD_LAY_INSTR_ADDR);
offset = nand_subop_get_addr_start_off(subop, op_id);
naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
addrs = &instr->ctx.addr.addrs[offset];
for (i = 0; i < naddrs; i++)
address |= (u64)addrs[i] << (8 * i);
mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR,
address);
mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE,
naddrs - 1);
ret = cadence_nand_generic_cmd_send(cdns_ctrl,
cdns_chip->cs[chip->cur_cs],
mini_ctrl_cmd);
if (ret)
dev_err(cdns_ctrl->dev, "send address %llx failed\n", address);
return ret;
}
static int cadence_nand_cmd_erase(struct nand_chip *chip,
const struct nand_subop *subop)
{
unsigned int op_id;
if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_ERASE1) {
int i;
const struct nand_op_instr *instr = NULL;
unsigned int offset, naddrs;
const u8 *addrs;
u32 page = 0;
instr = &subop->instrs[1];
offset = nand_subop_get_addr_start_off(subop, 1);
naddrs = nand_subop_get_num_addr_cyc(subop, 1);
addrs = &instr->ctx.addr.addrs[offset];
for (i = 0; i < naddrs; i++)
page |= (u32)addrs[i] << (8 * i);
return cadence_nand_erase(chip, page);
}
/*
* If it is not an erase operation then handle operation
* by calling exec_op function.
*/
for (op_id = 0; op_id < subop->ninstrs; op_id++) {
int ret;
const struct nand_operation nand_op = {
.cs = chip->cur_cs,
.instrs = &subop->instrs[op_id],
.ninstrs = 1};
ret = chip->controller->ops->exec_op(chip, &nand_op, false);
if (ret)
return ret;
}
return 0;
}
static int cadence_nand_cmd_data(struct nand_chip *chip,
const struct nand_subop *subop)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
const struct nand_op_instr *instr;
unsigned int offset, op_id = 0;
u64 mini_ctrl_cmd = 0;
int len = 0;
int ret;
instr = &subop->instrs[op_id];
if (instr->delay_ns > 0)
mini_ctrl_cmd |= GCMD_LAY_TWB;
mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
GCMD_LAY_INSTR_DATA);
if (instr->type == NAND_OP_DATA_OUT_INSTR)
mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR,
GCMD_DIR_WRITE);
len = nand_subop_get_data_len(subop, op_id);
offset = nand_subop_get_data_start_off(subop, op_id);
mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1);
mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len);
if (instr->ctx.data.force_8bit) {
ret = cadence_nand_force_byte_access(chip, true);
if (ret) {
dev_err(cdns_ctrl->dev,
"cannot change byte access generic data cmd failed\n");
return ret;
}
}
ret = cadence_nand_generic_cmd_send(cdns_ctrl,
cdns_chip->cs[chip->cur_cs],
mini_ctrl_cmd);
if (ret) {
dev_err(cdns_ctrl->dev, "send generic data cmd failed\n");
return ret;
}
if (instr->type == NAND_OP_DATA_IN_INSTR) {
void *buf = instr->ctx.data.buf.in + offset;
ret = cadence_nand_read_buf(cdns_ctrl, buf, len);
} else {
const void *buf = instr->ctx.data.buf.out + offset;
ret = cadence_nand_write_buf(cdns_ctrl, buf, len);
}
if (ret) {
dev_err(cdns_ctrl->dev, "data transfer failed for generic command\n");
return ret;
}
if (instr->ctx.data.force_8bit) {
ret = cadence_nand_force_byte_access(chip, false);
if (ret) {
dev_err(cdns_ctrl->dev,
"cannot change byte access generic data cmd failed\n");
}
}
return ret;
}
static int cadence_nand_cmd_waitrdy(struct nand_chip *chip,
const struct nand_subop *subop)
{
int status;
unsigned int op_id = 0;
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
const struct nand_op_instr *instr = &subop->instrs[op_id];
u32 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
status = cadence_nand_wait_for_value(cdns_ctrl, RBN_SETINGS,
timeout_us,
BIT(cdns_chip->cs[chip->cur_cs]),
false);
return status;
}
static const struct nand_op_parser cadence_nand_op_parser = NAND_OP_PARSER(
NAND_OP_PARSER_PATTERN(
cadence_nand_cmd_erase,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC),
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
NAND_OP_PARSER_PATTERN(
cadence_nand_cmd_opcode,
NAND_OP_PARSER_PAT_CMD_ELEM(false)),
NAND_OP_PARSER_PATTERN(
cadence_nand_cmd_address,
NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC)),
NAND_OP_PARSER_PATTERN(
cadence_nand_cmd_data,
NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE)),
NAND_OP_PARSER_PATTERN(
cadence_nand_cmd_data,
NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE)),
NAND_OP_PARSER_PATTERN(
cadence_nand_cmd_waitrdy,
NAND_OP_PARSER_PAT_WAITRDY_ELEM(false))
);
static int cadence_nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
if (!check_only) {
int status = cadence_nand_select_target(chip);
if (status)
return status;
}
return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op,
check_only);
}
static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
if (section)
return -ERANGE;
oobregion->offset = cdns_chip->bbm_len;
oobregion->length = cdns_chip->avail_oob_size
- cdns_chip->bbm_len;
return 0;
}
static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
if (section)
return -ERANGE;
oobregion->offset = cdns_chip->avail_oob_size;
oobregion->length = chip->ecc.total;
return 0;
}
static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = {
.free = cadence_nand_ooblayout_free,
.ecc = cadence_nand_ooblayout_ecc,
};
static int calc_cycl(u32 timing, u32 clock)
{
if (timing == 0 || clock == 0)
return 0;
if ((timing % clock) > 0)
return timing / clock;
else
return timing / clock - 1;
}
/* Calculate max data valid window. */
static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
u32 board_delay_skew_min, u32 ext_mode)
{
if (ext_mode == 0)
clk_period /= 2;
return (trp_cnt + 1) * clk_period + trhoh_min +
board_delay_skew_min;
}
/* Calculate data valid window. */
static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
u32 trea_max, u32 ext_mode)
{
if (ext_mode == 0)
clk_period /= 2;
return (trp_cnt + 1) * clk_period + trhoh_min - trea_max;
}
static int
cadence_nand_setup_interface(struct nand_chip *chip, int chipnr,
const struct nand_interface_config *conf)
{
const struct nand_sdr_timings *sdr;
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
struct cadence_nand_timings *t = &cdns_chip->timings;
u32 reg;
u32 board_delay = cdns_ctrl->board_delay;
u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL,
cdns_ctrl->nf_clk_rate);
u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt;
u32 tfeat_cnt, trhz_cnt, tvdly_cnt;
u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt;
u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0;
u32 if_skew = cdns_ctrl->caps1->if_skew;
u32 board_delay_skew_min = board_delay - if_skew;
u32 board_delay_skew_max = board_delay + if_skew;
u32 dqs_sampl_res, phony_dqs_mod;
u32 tdvw, tdvw_min, tdvw_max;
u32 ext_rd_mode, ext_wr_mode;
u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0;
u32 sampling_point;
sdr = nand_get_sdr_timings(conf);
if (IS_ERR(sdr))
return PTR_ERR(sdr);
memset(t, 0, sizeof(*t));
/* Sampling point calculation. */
if (cdns_ctrl->caps2.is_phy_type_dll)
phony_dqs_mod = 2;
else
phony_dqs_mod = 1;
dqs_sampl_res = clk_period / phony_dqs_mod;
tdvw_min = sdr->tREA_max + board_delay_skew_max;
/*
* The idea of those calculation is to get the optimum value
* for tRP and tRH timings. If it is NOT possible to sample data
* with optimal tRP/tRH settings, the parameters will be extended.
* If clk_period is 50ns (the lowest value) this condition is met
* for SDR timing modes 1, 2, 3, 4 and 5.
* If clk_period is 20ns the condition is met only for SDR timing
* mode 5.
*/
if (sdr->tRC_min <= clk_period &&
sdr->tRP_min <= (clk_period / 2) &&
sdr->tREH_min <= (clk_period / 2)) {
/* Performance mode. */
ext_rd_mode = 0;
tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
sdr->tREA_max, ext_rd_mode);
tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min,
board_delay_skew_min,
ext_rd_mode);
/*
* Check if data valid window and sampling point can be found
* and is not on the edge (ie. we have hold margin).
* If not extend the tRP timings.
*/
if (tdvw > 0) {
if (tdvw_max <= tdvw_min ||
(tdvw_max % dqs_sampl_res) == 0) {
/*
* No valid sampling point so the RE pulse need
* to be widen widening by half clock cycle.
*/
ext_rd_mode = 1;
}
} else {
/*
* There is no valid window
* to be able to sample data the tRP need to be widen.
* Very safe calculations are performed here.
*/
trp_cnt = (sdr->tREA_max + board_delay_skew_max
+ dqs_sampl_res) / clk_period;
ext_rd_mode = 1;
}
} else {
/* Extended read mode. */
u32 trh;
ext_rd_mode = 1;
trp_cnt = calc_cycl(sdr->tRP_min, clk_period);
trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period);
if (sdr->tREH_min >= trh)
trh_cnt = calc_cycl(sdr->tREH_min, clk_period);
else
trh_cnt = calc_cycl(trh, clk_period);
tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
sdr->tREA_max, ext_rd_mode);
/*
* Check if data valid window and sampling point can be found
* or if it is at the edge check if previous is valid
* - if not extend the tRP timings.
*/
if (tdvw > 0) {
tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
sdr->tRHOH_min,
board_delay_skew_min,
ext_rd_mode);
if ((((tdvw_max / dqs_sampl_res)
* dqs_sampl_res) <= tdvw_min) ||
(((tdvw_max % dqs_sampl_res) == 0) &&
(((tdvw_max / dqs_sampl_res - 1)
* dqs_sampl_res) <= tdvw_min))) {
/*
* Data valid window width is lower than
* sampling resolution and do not hit any
* sampling point to be sure the sampling point
* will be found the RE low pulse width will be
* extended by one clock cycle.
*/
trp_cnt = trp_cnt + 1;
}
} else {
/*
* There is no valid window to be able to sample data.
* The tRP need to be widen.
* Very safe calculations are performed here.
*/
trp_cnt = (sdr->tREA_max + board_delay_skew_max
+ dqs_sampl_res) / clk_period;
}
}
tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
sdr->tRHOH_min,
board_delay_skew_min, ext_rd_mode);
if (sdr->tWC_min <= clk_period &&
(sdr->tWP_min + if_skew) <= (clk_period / 2) &&
(sdr->tWH_min + if_skew) <= (clk_period / 2)) {
ext_wr_mode = 0;
} else {
u32 twh;
ext_wr_mode = 1;
twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period);
if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew))
twp_cnt = calc_cycl(sdr->tALS_min + if_skew,
clk_period);
twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period);
if (sdr->tWH_min >= twh)
twh = sdr->tWH_min;
twh_cnt = calc_cycl(twh + if_skew, clk_period);
}
reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt);
reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt);
reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt);
reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt);
t->async_toggle_timings = reg;
dev_dbg(cdns_ctrl->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg);
tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period);
tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period);
twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period);
trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period);
reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt);
/*
* If timing exceeds delay field in timing register
* then use maximum value.
*/
if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt))
reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt);
else
reg |= TIMINGS0_TCCS;
reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt);
reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt);
t->timings0 = reg;
dev_dbg(cdns_ctrl->dev, "TIMINGS0_SDR\t%x\n", reg);
/* The following is related to single signal so skew is not needed. */
trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period);
trhz_cnt = trhz_cnt + 1;
twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period);
/*
* Because of the two stage syncflop the value must be increased by 3
* first value is related with sync, second value is related
* with output if delay.
*/
twb_cnt = twb_cnt + 3 + 5;
/*
* The following is related to the we edge of the random data input
* sequence so skew is not needed.
*/
tvdly_cnt = calc_cycl(500000 + if_skew, clk_period);
reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt);
reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt);
reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt);
t->timings1 = reg;
dev_dbg(cdns_ctrl->dev, "TIMINGS1_SDR\t%x\n", reg);
tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period);
if (tfeat_cnt < twb_cnt)
tfeat_cnt = twb_cnt;
tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period);
tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period);
reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt);
reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt);
reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt);
t->timings2 = reg;
dev_dbg(cdns_ctrl->dev, "TIMINGS2_SDR\t%x\n", reg);
if (cdns_ctrl->caps2.is_phy_type_dll) {
reg = DLL_PHY_CTRL_DLL_RST_N;
if (ext_wr_mode)
reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE;
if (ext_rd_mode)
reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE;
reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7);
reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7);
t->dll_phy_ctrl = reg;
dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg);
}
/* Sampling point calculation. */
if ((tdvw_max % dqs_sampl_res) > 0)
sampling_point = tdvw_max / dqs_sampl_res;
else
sampling_point = (tdvw_max / dqs_sampl_res - 1);
if (sampling_point * dqs_sampl_res > tdvw_min) {
dll_phy_dqs_timing =
FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4);
dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS;
phony_dqs_timing = sampling_point / phony_dqs_mod;
if ((sampling_point % 2) > 0) {
dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL;
if ((tdvw_max % dqs_sampl_res) == 0)
/*
* Calculation for sampling point at the edge
* of data and being odd number.
*/
phony_dqs_timing = (tdvw_max / dqs_sampl_res)
/ phony_dqs_mod - 1;
if (!cdns_ctrl->caps2.is_phy_type_dll)
phony_dqs_timing--;
} else {
phony_dqs_timing--;
}
rd_del_sel = phony_dqs_timing + 3;
} else {
dev_warn(cdns_ctrl->dev,
"ERROR : cannot find valid sampling point\n");
}
reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing);
if (cdns_ctrl->caps2.is_phy_type_dll)
reg |= PHY_CTRL_SDR_DQS;
t->phy_ctrl = reg;
dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_SDR\t%x\n", reg);
if (cdns_ctrl->caps2.is_phy_type_dll) {
dev_dbg(cdns_ctrl->dev, "PHY_TSEL_REG_SDR\t%x\n", 0);
dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2);
dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n",
dll_phy_dqs_timing);
t->phy_dqs_timing = dll_phy_dqs_timing;
reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel);
dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n",
reg);
t->phy_gate_lpbk_ctrl = reg;
dev_dbg(cdns_ctrl->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n",
PHY_DLL_MASTER_CTRL_BYPASS_MODE);
dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
}
return 0;
}
static int cadence_nand_attach_chip(struct nand_chip *chip)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
u32 ecc_size;
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
if (chip->options & NAND_BUSWIDTH_16) {
ret = cadence_nand_set_access_width16(cdns_ctrl, true);
if (ret)
return ret;
}
chip->bbt_options |= NAND_BBT_USE_FLASH;
chip->bbt_options |= NAND_BBT_NO_OOB;
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->options |= NAND_NO_SUBPAGE_WRITE;
cdns_chip->bbm_offs = chip->badblockpos;
cdns_chip->bbm_offs &= ~0x01;
/* this value should be even number */
cdns_chip->bbm_len = 2;
ret = nand_ecc_choose_conf(chip,
&cdns_ctrl->ecc_caps,
mtd->oobsize - cdns_chip->bbm_len);
if (ret) {
dev_err(cdns_ctrl->dev, "ECC configuration failed\n");
return ret;
}
dev_dbg(cdns_ctrl->dev,
"chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
/* Error correction configuration. */
cdns_chip->sector_size = chip->ecc.size;
cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
if (cdns_chip->avail_oob_size > cdns_ctrl->bch_metadata_size)
cdns_chip->avail_oob_size = cdns_ctrl->bch_metadata_size;
if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
> mtd->oobsize)
cdns_chip->avail_oob_size -= 4;
ret = cadence_nand_get_ecc_strength_idx(cdns_ctrl, chip->ecc.strength);
if (ret < 0)
return -EINVAL;
cdns_chip->corr_str_idx = (u8)ret;
if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1000000,
CTRL_STATUS_CTRL_BUSY, true))
return -ETIMEDOUT;
cadence_nand_set_ecc_strength(cdns_ctrl,
cdns_chip->corr_str_idx);
cadence_nand_set_erase_detection(cdns_ctrl, true,
chip->ecc.strength);
/* Override the default read operations. */
chip->ecc.read_page = cadence_nand_read_page;
chip->ecc.read_page_raw = cadence_nand_read_page_raw;
chip->ecc.write_page = cadence_nand_write_page;
chip->ecc.write_page_raw = cadence_nand_write_page_raw;
chip->ecc.read_oob = cadence_nand_read_oob;
chip->ecc.write_oob = cadence_nand_write_oob;
chip->ecc.read_oob_raw = cadence_nand_read_oob_raw;
chip->ecc.write_oob_raw = cadence_nand_write_oob_raw;
if ((mtd->writesize + mtd->oobsize) > cdns_ctrl->buf_size)
cdns_ctrl->buf_size = mtd->writesize + mtd->oobsize;
/* Is 32-bit DMA supported? */
ret = dma_set_mask(cdns_ctrl->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(cdns_ctrl->dev, "no usable DMA configuration\n");
return ret;
}
mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops);
return 0;
}
static const struct nand_controller_ops cadence_nand_controller_ops = {
.attach_chip = cadence_nand_attach_chip,
.exec_op = cadence_nand_exec_op,
.setup_interface = cadence_nand_setup_interface,
};
static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
struct device_node *np)
{
struct cdns_nand_chip *cdns_chip;
struct mtd_info *mtd;
struct nand_chip *chip;
int nsels, ret, i;
u32 cs;
nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
if (nsels <= 0) {
dev_err(cdns_ctrl->dev, "missing/invalid reg property\n");
return -EINVAL;
}
/* Allocate the nand chip structure. */
cdns_chip = devm_kzalloc(cdns_ctrl->dev, sizeof(*cdns_chip) +
(nsels * sizeof(u8)),
GFP_KERNEL);
if (!cdns_chip) {
dev_err(cdns_ctrl->dev, "could not allocate chip structure\n");
return -ENOMEM;
}
cdns_chip->nsels = nsels;
for (i = 0; i < nsels; i++) {
/* Retrieve CS id. */
ret = of_property_read_u32_index(np, "reg", i, &cs);
if (ret) {
dev_err(cdns_ctrl->dev,
"could not retrieve reg property: %d\n",
ret);
return ret;
}
if (cs >= cdns_ctrl->caps2.max_banks) {
dev_err(cdns_ctrl->dev,
"invalid reg value: %u (max CS = %d)\n",
cs, cdns_ctrl->caps2.max_banks);
return -EINVAL;
}
if (test_and_set_bit(cs, &cdns_ctrl->assigned_cs)) {
dev_err(cdns_ctrl->dev,
"CS %d already assigned\n", cs);
return -EINVAL;
}
cdns_chip->cs[i] = cs;
}
chip = &cdns_chip->chip;
chip->controller = &cdns_ctrl->controller;
nand_set_flash_node(chip, np);
mtd = nand_to_mtd(chip);
mtd->dev.parent = cdns_ctrl->dev;
/*
* Default to HW ECC engine mode. If the nand-ecc-mode property is given
* in the DT node, this entry will be overwritten in nand_scan_ident().
*/
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
ret = nand_scan(chip, cdns_chip->nsels);
if (ret) {
dev_err(cdns_ctrl->dev, "could not scan the nand chip\n");
return ret;
}
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(cdns_ctrl->dev,
"failed to register mtd device: %d\n", ret);
nand_cleanup(chip);
return ret;
}
list_add_tail(&cdns_chip->node, &cdns_ctrl->chips);
return 0;
}
static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
{
struct cdns_nand_chip *entry, *temp;
struct nand_chip *chip;
int ret;
list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) {
chip = &entry->chip;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
list_del(&entry->node);
}
}
static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
{
struct device_node *np = cdns_ctrl->dev->of_node;
struct device_node *nand_np;
int max_cs = cdns_ctrl->caps2.max_banks;
int nchips, ret;
nchips = of_get_child_count(np);
if (nchips > max_cs) {
dev_err(cdns_ctrl->dev,
"too many NAND chips: %d (max = %d CS)\n",
nchips, max_cs);
return -EINVAL;
}
for_each_child_of_node(np, nand_np) {
ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
if (ret) {
of_node_put(nand_np);
cadence_nand_chips_cleanup(cdns_ctrl);
return ret;
}
}
return 0;
}
static void
cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
{
/* Disable interrupts. */
writel_relaxed(INTR_ENABLE_INTR_EN, cdns_ctrl->reg + INTR_ENABLE);
}
static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
{
dma_cap_mask_t mask;
int ret;
cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
sizeof(*cdns_ctrl->cdma_desc),
&cdns_ctrl->dma_cdma_desc,
GFP_KERNEL);
if (!cdns_ctrl->dma_cdma_desc)
return -ENOMEM;
cdns_ctrl->buf_size = SZ_16K;
cdns_ctrl->buf = kmalloc(cdns_ctrl->buf_size, GFP_KERNEL);
if (!cdns_ctrl->buf) {
ret = -ENOMEM;
goto free_buf_desc;
}
if (devm_request_irq(cdns_ctrl->dev, cdns_ctrl->irq, cadence_nand_isr,
IRQF_SHARED, "cadence-nand-controller",
cdns_ctrl)) {
dev_err(cdns_ctrl->dev, "Unable to allocate IRQ\n");
ret = -ENODEV;
goto free_buf;
}
spin_lock_init(&cdns_ctrl->irq_lock);
init_completion(&cdns_ctrl->complete);
ret = cadence_nand_hw_init(cdns_ctrl);
if (ret)
goto disable_irq;
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
if (cdns_ctrl->caps1->has_dma) {
cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
if (!cdns_ctrl->dmac) {
dev_err(cdns_ctrl->dev,
"Unable to get a DMA channel\n");
ret = -EBUSY;
goto disable_irq;
}
}
nand_controller_init(&cdns_ctrl->controller);
INIT_LIST_HEAD(&cdns_ctrl->chips);
cdns_ctrl->controller.ops = &cadence_nand_controller_ops;
cdns_ctrl->curr_corr_str_idx = 0xFF;
ret = cadence_nand_chips_init(cdns_ctrl);
if (ret) {
dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
ret);
goto dma_release_chnl;
}
kfree(cdns_ctrl->buf);
cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
if (!cdns_ctrl->buf) {
ret = -ENOMEM;
goto dma_release_chnl;
}
return 0;
dma_release_chnl:
if (cdns_ctrl->dmac)
dma_release_channel(cdns_ctrl->dmac);
disable_irq:
cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
free_buf:
kfree(cdns_ctrl->buf);
free_buf_desc:
dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
return ret;
}
/* Driver exit point. */
static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
{
cadence_nand_chips_cleanup(cdns_ctrl);
cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
kfree(cdns_ctrl->buf);
dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
if (cdns_ctrl->dmac)
dma_release_channel(cdns_ctrl->dmac);
}
struct cadence_nand_dt {
struct cdns_nand_ctrl cdns_ctrl;
struct clk *clk;
};
static const struct cadence_nand_dt_devdata cadence_nand_default = {
.if_skew = 0,
.has_dma = 1,
};
static const struct of_device_id cadence_nand_dt_ids[] = {
{
.compatible = "cdns,hp-nfc",
.data = &cadence_nand_default
}, {}
};
MODULE_DEVICE_TABLE(of, cadence_nand_dt_ids);
static int cadence_nand_dt_probe(struct platform_device *ofdev)
{
struct resource *res;
struct cadence_nand_dt *dt;
struct cdns_nand_ctrl *cdns_ctrl;
int ret;
const struct of_device_id *of_id;
const struct cadence_nand_dt_devdata *devdata;
u32 val;
of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev);
if (of_id) {
ofdev->id_entry = of_id->data;
devdata = of_id->data;
} else {
pr_err("Failed to find the right device id.\n");
return -ENOMEM;
}
dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
if (!dt)
return -ENOMEM;
cdns_ctrl = &dt->cdns_ctrl;
cdns_ctrl->caps1 = devdata;
cdns_ctrl->dev = &ofdev->dev;
cdns_ctrl->irq = platform_get_irq(ofdev, 0);
if (cdns_ctrl->irq < 0)
return cdns_ctrl->irq;
dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq);
cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0);
if (IS_ERR(cdns_ctrl->reg))
return PTR_ERR(cdns_ctrl->reg);
cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
if (IS_ERR(cdns_ctrl->io.virt))
return PTR_ERR(cdns_ctrl->io.virt);
cdns_ctrl->io.dma = res->start;
dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
if (IS_ERR(dt->clk))
return PTR_ERR(dt->clk);
cdns_ctrl->nf_clk_rate = clk_get_rate(dt->clk);
ret = of_property_read_u32(ofdev->dev.of_node,
"cdns,board-delay-ps", &val);
if (ret) {
val = 4830;
dev_info(cdns_ctrl->dev,
"missing cdns,board-delay-ps property, %d was set\n",
val);
}
cdns_ctrl->board_delay = val;
ret = cadence_nand_init(cdns_ctrl);
if (ret)
return ret;
platform_set_drvdata(ofdev, dt);
return 0;
}
static void cadence_nand_dt_remove(struct platform_device *ofdev)
{
struct cadence_nand_dt *dt = platform_get_drvdata(ofdev);
cadence_nand_remove(&dt->cdns_ctrl);
}
static struct platform_driver cadence_nand_dt_driver = {
.probe = cadence_nand_dt_probe,
.remove_new = cadence_nand_dt_remove,
.driver = {
.name = "cadence-nand-controller",
.of_match_table = cadence_nand_dt_ids,
},
};
module_platform_driver(cadence_nand_dt_driver);
MODULE_AUTHOR("Piotr Sroka <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Driver for Cadence NAND flash controller");
| linux-master | drivers/mtd/nand/raw/cadence-nand-controller.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Arasan NAND Flash Controller Driver
*
* Copyright (C) 2014 - 2020 Xilinx, Inc.
* Author:
* Miquel Raynal <[email protected]>
* Original work (fully rewritten):
* Punnaiah Choudary Kalluri <[email protected]>
* Naga Sureshkumar Relli <[email protected]>
*/
#include <linux/bch.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/rawnand.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define PKT_REG 0x00
#define PKT_SIZE(x) FIELD_PREP(GENMASK(10, 0), (x))
#define PKT_STEPS(x) FIELD_PREP(GENMASK(23, 12), (x))
#define MEM_ADDR1_REG 0x04
#define MEM_ADDR2_REG 0x08
#define ADDR2_STRENGTH(x) FIELD_PREP(GENMASK(27, 25), (x))
#define ADDR2_CS(x) FIELD_PREP(GENMASK(31, 30), (x))
#define CMD_REG 0x0C
#define CMD_1(x) FIELD_PREP(GENMASK(7, 0), (x))
#define CMD_2(x) FIELD_PREP(GENMASK(15, 8), (x))
#define CMD_PAGE_SIZE(x) FIELD_PREP(GENMASK(25, 23), (x))
#define CMD_DMA_ENABLE BIT(27)
#define CMD_NADDRS(x) FIELD_PREP(GENMASK(30, 28), (x))
#define CMD_ECC_ENABLE BIT(31)
#define PROG_REG 0x10
#define PROG_PGRD BIT(0)
#define PROG_ERASE BIT(2)
#define PROG_STATUS BIT(3)
#define PROG_PGPROG BIT(4)
#define PROG_RDID BIT(6)
#define PROG_RDPARAM BIT(7)
#define PROG_RST BIT(8)
#define PROG_GET_FEATURE BIT(9)
#define PROG_SET_FEATURE BIT(10)
#define PROG_CHG_RD_COL_ENH BIT(14)
#define INTR_STS_EN_REG 0x14
#define INTR_SIG_EN_REG 0x18
#define INTR_STS_REG 0x1C
#define WRITE_READY BIT(0)
#define READ_READY BIT(1)
#define XFER_COMPLETE BIT(2)
#define DMA_BOUNDARY BIT(6)
#define EVENT_MASK GENMASK(7, 0)
#define READY_STS_REG 0x20
#define DMA_ADDR0_REG 0x50
#define DMA_ADDR1_REG 0x24
#define FLASH_STS_REG 0x28
#define TIMING_REG 0x2C
#define TCCS_TIME_500NS 0
#define TCCS_TIME_300NS 3
#define TCCS_TIME_200NS 2
#define TCCS_TIME_100NS 1
#define FAST_TCAD BIT(2)
#define DQS_BUFF_SEL_IN(x) FIELD_PREP(GENMASK(6, 3), (x))
#define DQS_BUFF_SEL_OUT(x) FIELD_PREP(GENMASK(18, 15), (x))
#define DATA_PORT_REG 0x30
#define ECC_CONF_REG 0x34
#define ECC_CONF_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
#define ECC_CONF_LEN(x) FIELD_PREP(GENMASK(26, 16), (x))
#define ECC_CONF_BCH_EN BIT(27)
#define ECC_ERR_CNT_REG 0x38
#define GET_PKT_ERR_CNT(x) FIELD_GET(GENMASK(7, 0), (x))
#define GET_PAGE_ERR_CNT(x) FIELD_GET(GENMASK(16, 8), (x))
#define ECC_SP_REG 0x3C
#define ECC_SP_CMD1(x) FIELD_PREP(GENMASK(7, 0), (x))
#define ECC_SP_CMD2(x) FIELD_PREP(GENMASK(15, 8), (x))
#define ECC_SP_ADDRS(x) FIELD_PREP(GENMASK(30, 28), (x))
#define ECC_1ERR_CNT_REG 0x40
#define ECC_2ERR_CNT_REG 0x44
#define DATA_INTERFACE_REG 0x6C
#define DIFACE_SDR_MODE(x) FIELD_PREP(GENMASK(2, 0), (x))
#define DIFACE_DDR_MODE(x) FIELD_PREP(GENMASK(5, 3), (x))
#define DIFACE_SDR 0
#define DIFACE_NVDDR BIT(9)
#define ANFC_MAX_CS 2
#define ANFC_DFLT_TIMEOUT_US 1000000
#define ANFC_MAX_CHUNK_SIZE SZ_1M
#define ANFC_MAX_PARAM_SIZE SZ_4K
#define ANFC_MAX_STEPS SZ_2K
#define ANFC_MAX_PKT_SIZE (SZ_2K - 1)
#define ANFC_MAX_ADDR_CYC 5U
#define ANFC_RSVD_ECC_BYTES 21
#define ANFC_XLNX_SDR_DFLT_CORE_CLK 100000000
#define ANFC_XLNX_SDR_HS_CORE_CLK 80000000
static struct gpio_desc *anfc_default_cs_array[2] = {NULL, NULL};
/**
* struct anfc_op - Defines how to execute an operation
* @pkt_reg: Packet register
* @addr1_reg: Memory address 1 register
* @addr2_reg: Memory address 2 register
* @cmd_reg: Command register
* @prog_reg: Program register
* @steps: Number of "packets" to read/write
* @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
* @len: Data transfer length
* @read: Data transfer direction from the controller point of view
* @buf: Data buffer
*/
struct anfc_op {
u32 pkt_reg;
u32 addr1_reg;
u32 addr2_reg;
u32 cmd_reg;
u32 prog_reg;
int steps;
unsigned int rdy_timeout_ms;
unsigned int len;
bool read;
u8 *buf;
};
/**
* struct anand - Defines the NAND chip related information
* @node: Used to store NAND chips into a list
* @chip: NAND chip information structure
* @rb: Ready-busy line
* @page_sz: Register value of the page_sz field to use
* @clk: Expected clock frequency to use
* @data_iface: Data interface timing mode to use
* @timings: NV-DDR specific timings to use
* @ecc_conf: Hardware ECC configuration value
* @strength: Register value of the ECC strength
* @raddr_cycles: Row address cycle information
* @caddr_cycles: Column address cycle information
* @ecc_bits: Exact number of ECC bits per syndrome
* @ecc_total: Total number of ECC bytes
* @errloc: Array of errors located with soft BCH
* @hw_ecc: Buffer to store syndromes computed by hardware
* @bch: BCH structure
* @cs_idx: Array of chip-select for this device, values are indexes
* of the controller structure @gpio_cs array
* @ncs_idx: Size of the @cs_idx array
*/
struct anand {
struct list_head node;
struct nand_chip chip;
unsigned int rb;
unsigned int page_sz;
unsigned long clk;
u32 data_iface;
u32 timings;
u32 ecc_conf;
u32 strength;
u16 raddr_cycles;
u16 caddr_cycles;
unsigned int ecc_bits;
unsigned int ecc_total;
unsigned int *errloc;
u8 *hw_ecc;
struct bch_control *bch;
int *cs_idx;
int ncs_idx;
};
/**
* struct arasan_nfc - Defines the Arasan NAND flash controller driver instance
* @dev: Pointer to the device structure
* @base: Remapped register area
* @controller_clk: Pointer to the system clock
* @bus_clk: Pointer to the flash clock
* @controller: Base controller structure
* @chips: List of all NAND chips attached to the controller
* @cur_clk: Current clock rate
* @cs_array: CS array. Native CS are left empty, the other cells are
* populated with their corresponding GPIO descriptor.
* @ncs: Size of @cs_array
* @cur_cs: Index in @cs_array of the currently in use CS
* @native_cs: Currently selected native CS
* @spare_cs: Native CS that is not wired (may be selected when a GPIO
* CS is in use)
*/
struct arasan_nfc {
struct device *dev;
void __iomem *base;
struct clk *controller_clk;
struct clk *bus_clk;
struct nand_controller controller;
struct list_head chips;
unsigned int cur_clk;
struct gpio_desc **cs_array;
unsigned int ncs;
int cur_cs;
unsigned int native_cs;
unsigned int spare_cs;
};
static struct anand *to_anand(struct nand_chip *nand)
{
return container_of(nand, struct anand, chip);
}
static struct arasan_nfc *to_anfc(struct nand_controller *ctrl)
{
return container_of(ctrl, struct arasan_nfc, controller);
}
static int anfc_wait_for_event(struct arasan_nfc *nfc, unsigned int event)
{
u32 val;
int ret;
ret = readl_relaxed_poll_timeout(nfc->base + INTR_STS_REG, val,
val & event, 0,
ANFC_DFLT_TIMEOUT_US);
if (ret) {
dev_err(nfc->dev, "Timeout waiting for event 0x%x\n", event);
return -ETIMEDOUT;
}
writel_relaxed(event, nfc->base + INTR_STS_REG);
return 0;
}
static int anfc_wait_for_rb(struct arasan_nfc *nfc, struct nand_chip *chip,
unsigned int timeout_ms)
{
struct anand *anand = to_anand(chip);
u32 val;
int ret;
/* There is no R/B interrupt, we must poll a register */
ret = readl_relaxed_poll_timeout(nfc->base + READY_STS_REG, val,
val & BIT(anand->rb),
1, timeout_ms * 1000);
if (ret) {
dev_err(nfc->dev, "Timeout waiting for R/B 0x%x\n",
readl_relaxed(nfc->base + READY_STS_REG));
return -ETIMEDOUT;
}
return 0;
}
static void anfc_trigger_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
{
writel_relaxed(nfc_op->pkt_reg, nfc->base + PKT_REG);
writel_relaxed(nfc_op->addr1_reg, nfc->base + MEM_ADDR1_REG);
writel_relaxed(nfc_op->addr2_reg, nfc->base + MEM_ADDR2_REG);
writel_relaxed(nfc_op->cmd_reg, nfc->base + CMD_REG);
writel_relaxed(nfc_op->prog_reg, nfc->base + PROG_REG);
}
static int anfc_pkt_len_config(unsigned int len, unsigned int *steps,
unsigned int *pktsize)
{
unsigned int nb, sz;
for (nb = 1; nb < ANFC_MAX_STEPS; nb *= 2) {
sz = len / nb;
if (sz <= ANFC_MAX_PKT_SIZE)
break;
}
if (sz * nb != len)
return -ENOTSUPP;
if (steps)
*steps = nb;
if (pktsize)
*pktsize = sz;
return 0;
}
static bool anfc_is_gpio_cs(struct arasan_nfc *nfc, int nfc_cs)
{
return nfc_cs >= 0 && nfc->cs_array[nfc_cs];
}
static int anfc_relative_to_absolute_cs(struct anand *anand, int num)
{
return anand->cs_idx[num];
}
static void anfc_assert_cs(struct arasan_nfc *nfc, unsigned int nfc_cs_idx)
{
/* CS did not change: do nothing */
if (nfc->cur_cs == nfc_cs_idx)
return;
/* Deassert the previous CS if it was a GPIO */
if (anfc_is_gpio_cs(nfc, nfc->cur_cs))
gpiod_set_value_cansleep(nfc->cs_array[nfc->cur_cs], 1);
/* Assert the new one */
if (anfc_is_gpio_cs(nfc, nfc_cs_idx)) {
nfc->native_cs = nfc->spare_cs;
gpiod_set_value_cansleep(nfc->cs_array[nfc_cs_idx], 0);
} else {
nfc->native_cs = nfc_cs_idx;
}
nfc->cur_cs = nfc_cs_idx;
}
static int anfc_select_target(struct nand_chip *chip, int target)
{
struct anand *anand = to_anand(chip);
struct arasan_nfc *nfc = to_anfc(chip->controller);
unsigned int nfc_cs_idx = anfc_relative_to_absolute_cs(anand, target);
int ret;
anfc_assert_cs(nfc, nfc_cs_idx);
/* Update the controller timings and the potential ECC configuration */
writel_relaxed(anand->data_iface, nfc->base + DATA_INTERFACE_REG);
writel_relaxed(anand->timings, nfc->base + TIMING_REG);
/* Update clock frequency */
if (nfc->cur_clk != anand->clk) {
clk_disable_unprepare(nfc->bus_clk);
ret = clk_set_rate(nfc->bus_clk, anand->clk);
if (ret) {
dev_err(nfc->dev, "Failed to change clock rate\n");
return ret;
}
ret = clk_prepare_enable(nfc->bus_clk);
if (ret) {
dev_err(nfc->dev,
"Failed to re-enable the bus clock\n");
return ret;
}
nfc->cur_clk = anand->clk;
}
return 0;
}
/*
* When using the embedded hardware ECC engine, the controller is in charge of
* feeding the engine with, first, the ECC residue present in the data array.
* A typical read operation is:
* 1/ Assert the read operation by sending the relevant command/address cycles
* but targeting the column of the first ECC bytes in the OOB area instead of
* the main data directly.
* 2/ After having read the relevant number of ECC bytes, the controller uses
* the RNDOUT/RNDSTART commands which are set into the "ECC Spare Command
* Register" to move the pointer back at the beginning of the main data.
* 3/ It will read the content of the main area for a given size (pktsize) and
* will feed the ECC engine with this buffer again.
* 4/ The ECC engine derives the ECC bytes for the given data and compare them
* with the ones already received. It eventually trigger status flags and
* then set the "Buffer Read Ready" flag.
* 5/ The corrected data is then available for reading from the data port
* register.
*
* The hardware BCH ECC engine is known to be inconstent in BCH mode and never
* reports uncorrectable errors. Because of this bug, we have to use the
* software BCH implementation in the read path.
*/
static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct arasan_nfc *nfc = to_anfc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
struct anand *anand = to_anand(chip);
unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
unsigned int max_bitflips = 0;
dma_addr_t dma_addr;
int step, ret;
struct anfc_op nfc_op = {
.pkt_reg =
PKT_SIZE(chip->ecc.size) |
PKT_STEPS(chip->ecc.steps),
.addr1_reg =
(page & 0xFF) << (8 * (anand->caddr_cycles)) |
(((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
.addr2_reg =
((page >> 16) & 0xFF) |
ADDR2_STRENGTH(anand->strength) |
ADDR2_CS(nfc->native_cs),
.cmd_reg =
CMD_1(NAND_CMD_READ0) |
CMD_2(NAND_CMD_READSTART) |
CMD_PAGE_SIZE(anand->page_sz) |
CMD_DMA_ENABLE |
CMD_NADDRS(anand->caddr_cycles +
anand->raddr_cycles),
.prog_reg = PROG_PGRD,
};
dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_FROM_DEVICE);
if (dma_mapping_error(nfc->dev, dma_addr)) {
dev_err(nfc->dev, "Buffer mapping error");
return -EIO;
}
writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
anfc_trigger_op(nfc, &nfc_op);
ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
dma_unmap_single(nfc->dev, dma_addr, len, DMA_FROM_DEVICE);
if (ret) {
dev_err(nfc->dev, "Error reading page %d\n", page);
return ret;
}
/* Store the raw OOB bytes as well */
ret = nand_change_read_column_op(chip, mtd->writesize, chip->oob_poi,
mtd->oobsize, 0);
if (ret)
return ret;
/*
* For each step, compute by softare the BCH syndrome over the raw data.
* Compare the theoretical amount of errors and compare with the
* hardware engine feedback.
*/
for (step = 0; step < chip->ecc.steps; step++) {
u8 *raw_buf = &buf[step * chip->ecc.size];
unsigned int bit, byte;
int bf, i;
/* Extract the syndrome, it is not necessarily aligned */
memset(anand->hw_ecc, 0, chip->ecc.bytes);
nand_extract_bits(anand->hw_ecc, 0,
&chip->oob_poi[mtd->oobsize - anand->ecc_total],
anand->ecc_bits * step, anand->ecc_bits);
bf = bch_decode(anand->bch, raw_buf, chip->ecc.size,
anand->hw_ecc, NULL, NULL, anand->errloc);
if (!bf) {
continue;
} else if (bf > 0) {
for (i = 0; i < bf; i++) {
/* Only correct the data, not the syndrome */
if (anand->errloc[i] < (chip->ecc.size * 8)) {
bit = BIT(anand->errloc[i] & 7);
byte = anand->errloc[i] >> 3;
raw_buf[byte] ^= bit;
}
}
mtd->ecc_stats.corrected += bf;
max_bitflips = max_t(unsigned int, max_bitflips, bf);
continue;
}
bf = nand_check_erased_ecc_chunk(raw_buf, chip->ecc.size,
NULL, 0, NULL, 0,
chip->ecc.strength);
if (bf > 0) {
mtd->ecc_stats.corrected += bf;
max_bitflips = max_t(unsigned int, max_bitflips, bf);
memset(raw_buf, 0xFF, chip->ecc.size);
} else if (bf < 0) {
mtd->ecc_stats.failed++;
}
}
return 0;
}
static int anfc_sel_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
int ret;
ret = anfc_select_target(chip, chip->cur_cs);
if (ret)
return ret;
return anfc_read_page_hw_ecc(chip, buf, oob_required, page);
};
static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
struct anand *anand = to_anand(chip);
struct arasan_nfc *nfc = to_anfc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
dma_addr_t dma_addr;
int ret;
struct anfc_op nfc_op = {
.pkt_reg =
PKT_SIZE(chip->ecc.size) |
PKT_STEPS(chip->ecc.steps),
.addr1_reg =
(page & 0xFF) << (8 * (anand->caddr_cycles)) |
(((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
.addr2_reg =
((page >> 16) & 0xFF) |
ADDR2_STRENGTH(anand->strength) |
ADDR2_CS(nfc->native_cs),
.cmd_reg =
CMD_1(NAND_CMD_SEQIN) |
CMD_2(NAND_CMD_PAGEPROG) |
CMD_PAGE_SIZE(anand->page_sz) |
CMD_DMA_ENABLE |
CMD_NADDRS(anand->caddr_cycles +
anand->raddr_cycles) |
CMD_ECC_ENABLE,
.prog_reg = PROG_PGPROG,
};
writel_relaxed(anand->ecc_conf, nfc->base + ECC_CONF_REG);
writel_relaxed(ECC_SP_CMD1(NAND_CMD_RNDIN) |
ECC_SP_ADDRS(anand->caddr_cycles),
nfc->base + ECC_SP_REG);
dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_TO_DEVICE);
if (dma_mapping_error(nfc->dev, dma_addr)) {
dev_err(nfc->dev, "Buffer mapping error");
return -EIO;
}
writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
anfc_trigger_op(nfc, &nfc_op);
ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
dma_unmap_single(nfc->dev, dma_addr, len, DMA_TO_DEVICE);
if (ret) {
dev_err(nfc->dev, "Error writing page %d\n", page);
return ret;
}
/* Spare data is not protected */
if (oob_required)
ret = nand_write_oob_std(chip, page);
return ret;
}
static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
int ret;
ret = anfc_select_target(chip, chip->cur_cs);
if (ret)
return ret;
return anfc_write_page_hw_ecc(chip, buf, oob_required, page);
};
/* NAND framework ->exec_op() hooks and related helpers */
static int anfc_parse_instructions(struct nand_chip *chip,
const struct nand_subop *subop,
struct anfc_op *nfc_op)
{
struct arasan_nfc *nfc = to_anfc(chip->controller);
struct anand *anand = to_anand(chip);
const struct nand_op_instr *instr = NULL;
bool first_cmd = true;
unsigned int op_id;
int ret, i;
memset(nfc_op, 0, sizeof(*nfc_op));
nfc_op->addr2_reg = ADDR2_CS(nfc->native_cs);
nfc_op->cmd_reg = CMD_PAGE_SIZE(anand->page_sz);
for (op_id = 0; op_id < subop->ninstrs; op_id++) {
unsigned int offset, naddrs, pktsize;
const u8 *addrs;
u8 *buf;
instr = &subop->instrs[op_id];
switch (instr->type) {
case NAND_OP_CMD_INSTR:
if (first_cmd)
nfc_op->cmd_reg |= CMD_1(instr->ctx.cmd.opcode);
else
nfc_op->cmd_reg |= CMD_2(instr->ctx.cmd.opcode);
first_cmd = false;
break;
case NAND_OP_ADDR_INSTR:
offset = nand_subop_get_addr_start_off(subop, op_id);
naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
addrs = &instr->ctx.addr.addrs[offset];
nfc_op->cmd_reg |= CMD_NADDRS(naddrs);
for (i = 0; i < min(ANFC_MAX_ADDR_CYC, naddrs); i++) {
if (i < 4)
nfc_op->addr1_reg |= (u32)addrs[i] << i * 8;
else
nfc_op->addr2_reg |= addrs[i];
}
break;
case NAND_OP_DATA_IN_INSTR:
nfc_op->read = true;
fallthrough;
case NAND_OP_DATA_OUT_INSTR:
offset = nand_subop_get_data_start_off(subop, op_id);
buf = instr->ctx.data.buf.in;
nfc_op->buf = &buf[offset];
nfc_op->len = nand_subop_get_data_len(subop, op_id);
ret = anfc_pkt_len_config(nfc_op->len, &nfc_op->steps,
&pktsize);
if (ret)
return ret;
/*
* Number of DATA cycles must be aligned on 4, this
* means the controller might read/write more than
* requested. This is harmless most of the time as extra
* DATA are discarded in the write path and read pointer
* adjusted in the read path.
*
* FIXME: The core should mark operations where
* reading/writing more is allowed so the exec_op()
* implementation can take the right decision when the
* alignment constraint is not met: adjust the number of
* DATA cycles when it's allowed, reject the operation
* otherwise.
*/
nfc_op->pkt_reg |= PKT_SIZE(round_up(pktsize, 4)) |
PKT_STEPS(nfc_op->steps);
break;
case NAND_OP_WAITRDY_INSTR:
nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
break;
}
}
return 0;
}
static int anfc_rw_pio_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
{
unsigned int dwords = (nfc_op->len / 4) / nfc_op->steps;
unsigned int last_len = nfc_op->len % 4;
unsigned int offset, dir;
u8 *buf = nfc_op->buf;
int ret, i;
for (i = 0; i < nfc_op->steps; i++) {
dir = nfc_op->read ? READ_READY : WRITE_READY;
ret = anfc_wait_for_event(nfc, dir);
if (ret) {
dev_err(nfc->dev, "PIO %s ready signal not received\n",
nfc_op->read ? "Read" : "Write");
return ret;
}
offset = i * (dwords * 4);
if (nfc_op->read)
ioread32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
dwords);
else
iowrite32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
dwords);
}
if (last_len) {
u32 remainder;
offset = nfc_op->len - last_len;
if (nfc_op->read) {
remainder = readl_relaxed(nfc->base + DATA_PORT_REG);
memcpy(&buf[offset], &remainder, last_len);
} else {
memcpy(&remainder, &buf[offset], last_len);
writel_relaxed(remainder, nfc->base + DATA_PORT_REG);
}
}
return anfc_wait_for_event(nfc, XFER_COMPLETE);
}
static int anfc_misc_data_type_exec(struct nand_chip *chip,
const struct nand_subop *subop,
u32 prog_reg)
{
struct arasan_nfc *nfc = to_anfc(chip->controller);
struct anfc_op nfc_op = {};
int ret;
ret = anfc_parse_instructions(chip, subop, &nfc_op);
if (ret)
return ret;
nfc_op.prog_reg = prog_reg;
anfc_trigger_op(nfc, &nfc_op);
if (nfc_op.rdy_timeout_ms) {
ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
if (ret)
return ret;
}
return anfc_rw_pio_op(nfc, &nfc_op);
}
static int anfc_param_read_type_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
return anfc_misc_data_type_exec(chip, subop, PROG_RDPARAM);
}
static int anfc_data_read_type_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
u32 prog_reg = PROG_PGRD;
/*
* Experience shows that while in SDR mode sending a CHANGE READ COLUMN
* command through the READ PAGE "type" always works fine, when in
* NV-DDR mode the same command simply fails. However, it was also
* spotted that any CHANGE READ COLUMN command sent through the CHANGE
* READ COLUMN ENHANCED "type" would correctly work in both cases (SDR
* and NV-DDR). So, for simplicity, let's program the controller with
* the CHANGE READ COLUMN ENHANCED "type" whenever we are requested to
* perform a CHANGE READ COLUMN operation.
*/
if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_RNDOUT &&
subop->instrs[2].ctx.cmd.opcode == NAND_CMD_RNDOUTSTART)
prog_reg = PROG_CHG_RD_COL_ENH;
return anfc_misc_data_type_exec(chip, subop, prog_reg);
}
static int anfc_param_write_type_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
return anfc_misc_data_type_exec(chip, subop, PROG_SET_FEATURE);
}
static int anfc_data_write_type_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
return anfc_misc_data_type_exec(chip, subop, PROG_PGPROG);
}
static int anfc_misc_zerolen_type_exec(struct nand_chip *chip,
const struct nand_subop *subop,
u32 prog_reg)
{
struct arasan_nfc *nfc = to_anfc(chip->controller);
struct anfc_op nfc_op = {};
int ret;
ret = anfc_parse_instructions(chip, subop, &nfc_op);
if (ret)
return ret;
nfc_op.prog_reg = prog_reg;
anfc_trigger_op(nfc, &nfc_op);
ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
if (ret)
return ret;
if (nfc_op.rdy_timeout_ms)
ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
return ret;
}
static int anfc_status_type_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
struct arasan_nfc *nfc = to_anfc(chip->controller);
u32 tmp;
int ret;
/* See anfc_check_op() for details about this constraint */
if (subop->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS)
return -ENOTSUPP;
ret = anfc_misc_zerolen_type_exec(chip, subop, PROG_STATUS);
if (ret)
return ret;
tmp = readl_relaxed(nfc->base + FLASH_STS_REG);
memcpy(subop->instrs[1].ctx.data.buf.in, &tmp, 1);
return 0;
}
static int anfc_reset_type_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
return anfc_misc_zerolen_type_exec(chip, subop, PROG_RST);
}
static int anfc_erase_type_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
return anfc_misc_zerolen_type_exec(chip, subop, PROG_ERASE);
}
static int anfc_wait_type_exec(struct nand_chip *chip,
const struct nand_subop *subop)
{
struct arasan_nfc *nfc = to_anfc(chip->controller);
struct anfc_op nfc_op = {};
int ret;
ret = anfc_parse_instructions(chip, subop, &nfc_op);
if (ret)
return ret;
return anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
}
static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
NAND_OP_PARSER_PATTERN(
anfc_param_read_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
NAND_OP_PARSER_PATTERN(
anfc_param_write_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_PARAM_SIZE)),
NAND_OP_PARSER_PATTERN(
anfc_data_read_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, ANFC_MAX_CHUNK_SIZE)),
NAND_OP_PARSER_PATTERN(
anfc_data_write_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
NAND_OP_PARSER_PAT_CMD_ELEM(false)),
NAND_OP_PARSER_PATTERN(
anfc_reset_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
NAND_OP_PARSER_PATTERN(
anfc_erase_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
NAND_OP_PARSER_PATTERN(
anfc_status_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
NAND_OP_PARSER_PATTERN(
anfc_wait_type_exec,
NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
);
static int anfc_check_op(struct nand_chip *chip,
const struct nand_operation *op)
{
const struct nand_op_instr *instr;
int op_id;
/*
* The controller abstracts all the NAND operations and do not support
* data only operations.
*
* TODO: The nand_op_parser framework should be extended to
* support custom checks on DATA instructions.
*/
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
switch (instr->type) {
case NAND_OP_ADDR_INSTR:
if (instr->ctx.addr.naddrs > ANFC_MAX_ADDR_CYC)
return -ENOTSUPP;
break;
case NAND_OP_DATA_IN_INSTR:
case NAND_OP_DATA_OUT_INSTR:
if (instr->ctx.data.len > ANFC_MAX_CHUNK_SIZE)
return -ENOTSUPP;
if (anfc_pkt_len_config(instr->ctx.data.len, NULL, NULL))
return -ENOTSUPP;
break;
default:
break;
}
}
/*
* The controller does not allow to proceed with a CMD+DATA_IN cycle
* manually on the bus by reading data from the data register. Instead,
* the controller abstract a status read operation with its own status
* register after ordering a read status operation. Hence, we cannot
* support any CMD+DATA_IN operation other than a READ STATUS.
*
* TODO: The nand_op_parser() framework should be extended to describe
* fixed patterns instead of open-coding this check here.
*/
if (op->ninstrs == 2 &&
op->instrs[0].type == NAND_OP_CMD_INSTR &&
op->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS &&
op->instrs[1].type == NAND_OP_DATA_IN_INSTR)
return -ENOTSUPP;
return nand_op_parser_exec_op(chip, &anfc_op_parser, op, true);
}
static int anfc_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
int ret;
if (check_only)
return anfc_check_op(chip, op);
ret = anfc_select_target(chip, op->cs);
if (ret)
return ret;
return nand_op_parser_exec_op(chip, &anfc_op_parser, op, check_only);
}
static int anfc_setup_interface(struct nand_chip *chip, int target,
const struct nand_interface_config *conf)
{
struct anand *anand = to_anand(chip);
struct arasan_nfc *nfc = to_anfc(chip->controller);
struct device_node *np = nfc->dev->of_node;
const struct nand_sdr_timings *sdr;
const struct nand_nvddr_timings *nvddr;
unsigned int tccs_min, dqs_mode, fast_tcad;
if (nand_interface_is_nvddr(conf)) {
nvddr = nand_get_nvddr_timings(conf);
if (IS_ERR(nvddr))
return PTR_ERR(nvddr);
} else {
sdr = nand_get_sdr_timings(conf);
if (IS_ERR(sdr))
return PTR_ERR(sdr);
}
if (target < 0)
return 0;
if (nand_interface_is_sdr(conf)) {
anand->data_iface = DIFACE_SDR |
DIFACE_SDR_MODE(conf->timings.mode);
anand->timings = 0;
} else {
anand->data_iface = DIFACE_NVDDR |
DIFACE_DDR_MODE(conf->timings.mode);
if (conf->timings.nvddr.tCCS_min <= 100000)
tccs_min = TCCS_TIME_100NS;
else if (conf->timings.nvddr.tCCS_min <= 200000)
tccs_min = TCCS_TIME_200NS;
else if (conf->timings.nvddr.tCCS_min <= 300000)
tccs_min = TCCS_TIME_300NS;
else
tccs_min = TCCS_TIME_500NS;
fast_tcad = 0;
if (conf->timings.nvddr.tCAD_min < 45000)
fast_tcad = FAST_TCAD;
switch (conf->timings.mode) {
case 5:
case 4:
dqs_mode = 2;
break;
case 3:
dqs_mode = 3;
break;
case 2:
dqs_mode = 4;
break;
case 1:
dqs_mode = 5;
break;
case 0:
default:
dqs_mode = 6;
break;
}
anand->timings = tccs_min | fast_tcad |
DQS_BUFF_SEL_IN(dqs_mode) |
DQS_BUFF_SEL_OUT(dqs_mode);
}
if (nand_interface_is_sdr(conf)) {
anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
} else {
/* ONFI timings are defined in picoseconds */
anand->clk = div_u64((u64)NSEC_PER_SEC * 1000,
conf->timings.nvddr.tCK_min);
}
/*
* Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work
* with f > 90MHz (default clock is 100MHz) but signals are unstable
* with higher modes. Hence we decrease a little bit the clock rate to
* 80MHz when using SDR modes 2-5 with this SoC.
*/
if (of_device_is_compatible(np, "xlnx,zynqmp-nand-controller") &&
nand_interface_is_sdr(conf) && conf->timings.mode >= 2)
anand->clk = ANFC_XLNX_SDR_HS_CORE_CLK;
return 0;
}
static int anfc_calc_hw_ecc_bytes(int step_size, int strength)
{
unsigned int bch_gf_mag, ecc_bits;
switch (step_size) {
case SZ_512:
bch_gf_mag = 13;
break;
case SZ_1K:
bch_gf_mag = 14;
break;
default:
return -EINVAL;
}
ecc_bits = bch_gf_mag * strength;
return DIV_ROUND_UP(ecc_bits, 8);
}
static const int anfc_hw_ecc_512_strengths[] = {4, 8, 12};
static const int anfc_hw_ecc_1024_strengths[] = {24};
static const struct nand_ecc_step_info anfc_hw_ecc_step_infos[] = {
{
.stepsize = SZ_512,
.strengths = anfc_hw_ecc_512_strengths,
.nstrengths = ARRAY_SIZE(anfc_hw_ecc_512_strengths),
},
{
.stepsize = SZ_1K,
.strengths = anfc_hw_ecc_1024_strengths,
.nstrengths = ARRAY_SIZE(anfc_hw_ecc_1024_strengths),
},
};
static const struct nand_ecc_caps anfc_hw_ecc_caps = {
.stepinfos = anfc_hw_ecc_step_infos,
.nstepinfos = ARRAY_SIZE(anfc_hw_ecc_step_infos),
.calc_ecc_bytes = anfc_calc_hw_ecc_bytes,
};
static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc,
struct nand_chip *chip)
{
struct anand *anand = to_anand(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
unsigned int bch_prim_poly = 0, bch_gf_mag = 0, ecc_offset;
int ret;
switch (mtd->writesize) {
case SZ_512:
case SZ_2K:
case SZ_4K:
case SZ_8K:
case SZ_16K:
break;
default:
dev_err(nfc->dev, "Unsupported page size %d\n", mtd->writesize);
return -EINVAL;
}
ret = nand_ecc_choose_conf(chip, &anfc_hw_ecc_caps, mtd->oobsize);
if (ret)
return ret;
switch (ecc->strength) {
case 12:
anand->strength = 0x1;
break;
case 8:
anand->strength = 0x2;
break;
case 4:
anand->strength = 0x3;
break;
case 24:
anand->strength = 0x4;
break;
default:
dev_err(nfc->dev, "Unsupported strength %d\n", ecc->strength);
return -EINVAL;
}
switch (ecc->size) {
case SZ_512:
bch_gf_mag = 13;
bch_prim_poly = 0x201b;
break;
case SZ_1K:
bch_gf_mag = 14;
bch_prim_poly = 0x4443;
break;
default:
dev_err(nfc->dev, "Unsupported step size %d\n", ecc->strength);
return -EINVAL;
}
mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
ecc->steps = mtd->writesize / ecc->size;
ecc->algo = NAND_ECC_ALGO_BCH;
anand->ecc_bits = bch_gf_mag * ecc->strength;
ecc->bytes = DIV_ROUND_UP(anand->ecc_bits, 8);
anand->ecc_total = DIV_ROUND_UP(anand->ecc_bits * ecc->steps, 8);
ecc_offset = mtd->writesize + mtd->oobsize - anand->ecc_total;
anand->ecc_conf = ECC_CONF_COL(ecc_offset) |
ECC_CONF_LEN(anand->ecc_total) |
ECC_CONF_BCH_EN;
anand->errloc = devm_kmalloc_array(nfc->dev, ecc->strength,
sizeof(*anand->errloc), GFP_KERNEL);
if (!anand->errloc)
return -ENOMEM;
anand->hw_ecc = devm_kmalloc(nfc->dev, ecc->bytes, GFP_KERNEL);
if (!anand->hw_ecc)
return -ENOMEM;
/* Enforce bit swapping to fit the hardware */
anand->bch = bch_init(bch_gf_mag, ecc->strength, bch_prim_poly, true);
if (!anand->bch)
return -EINVAL;
ecc->read_page = anfc_sel_read_page_hw_ecc;
ecc->write_page = anfc_sel_write_page_hw_ecc;
return 0;
}
static int anfc_attach_chip(struct nand_chip *chip)
{
struct anand *anand = to_anand(chip);
struct arasan_nfc *nfc = to_anfc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
int ret = 0;
if (mtd->writesize <= SZ_512)
anand->caddr_cycles = 1;
else
anand->caddr_cycles = 2;
if (chip->options & NAND_ROW_ADDR_3)
anand->raddr_cycles = 3;
else
anand->raddr_cycles = 2;
switch (mtd->writesize) {
case 512:
anand->page_sz = 0;
break;
case 1024:
anand->page_sz = 5;
break;
case 2048:
anand->page_sz = 1;
break;
case 4096:
anand->page_sz = 2;
break;
case 8192:
anand->page_sz = 3;
break;
case 16384:
anand->page_sz = 4;
break;
default:
return -EINVAL;
}
/* These hooks are valid for all ECC providers */
chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
switch (chip->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_NONE:
case NAND_ECC_ENGINE_TYPE_SOFT:
case NAND_ECC_ENGINE_TYPE_ON_DIE:
break;
case NAND_ECC_ENGINE_TYPE_ON_HOST:
ret = anfc_init_hw_ecc_controller(nfc, chip);
break;
default:
dev_err(nfc->dev, "Unsupported ECC mode: %d\n",
chip->ecc.engine_type);
return -EINVAL;
}
return ret;
}
static void anfc_detach_chip(struct nand_chip *chip)
{
struct anand *anand = to_anand(chip);
if (anand->bch)
bch_free(anand->bch);
}
static const struct nand_controller_ops anfc_ops = {
.exec_op = anfc_exec_op,
.setup_interface = anfc_setup_interface,
.attach_chip = anfc_attach_chip,
.detach_chip = anfc_detach_chip,
};
static int anfc_chip_init(struct arasan_nfc *nfc, struct device_node *np)
{
struct anand *anand;
struct nand_chip *chip;
struct mtd_info *mtd;
int rb, ret, i;
anand = devm_kzalloc(nfc->dev, sizeof(*anand), GFP_KERNEL);
if (!anand)
return -ENOMEM;
/* Chip-select init */
anand->ncs_idx = of_property_count_elems_of_size(np, "reg", sizeof(u32));
if (anand->ncs_idx <= 0 || anand->ncs_idx > nfc->ncs) {
dev_err(nfc->dev, "Invalid reg property\n");
return -EINVAL;
}
anand->cs_idx = devm_kcalloc(nfc->dev, anand->ncs_idx,
sizeof(*anand->cs_idx), GFP_KERNEL);
if (!anand->cs_idx)
return -ENOMEM;
for (i = 0; i < anand->ncs_idx; i++) {
ret = of_property_read_u32_index(np, "reg", i,
&anand->cs_idx[i]);
if (ret) {
dev_err(nfc->dev, "invalid CS property: %d\n", ret);
return ret;
}
}
/* Ready-busy init */
ret = of_property_read_u32(np, "nand-rb", &rb);
if (ret)
return ret;
if (rb >= ANFC_MAX_CS) {
dev_err(nfc->dev, "Wrong RB %d\n", rb);
return -EINVAL;
}
anand->rb = rb;
chip = &anand->chip;
mtd = nand_to_mtd(chip);
mtd->dev.parent = nfc->dev;
chip->controller = &nfc->controller;
chip->options = NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
NAND_USES_DMA;
nand_set_flash_node(chip, np);
if (!mtd->name) {
dev_err(nfc->dev, "NAND label property is mandatory\n");
return -EINVAL;
}
ret = nand_scan(chip, anand->ncs_idx);
if (ret) {
dev_err(nfc->dev, "Scan operation failed\n");
return ret;
}
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
nand_cleanup(chip);
return ret;
}
list_add_tail(&anand->node, &nfc->chips);
return 0;
}
static void anfc_chips_cleanup(struct arasan_nfc *nfc)
{
struct anand *anand, *tmp;
struct nand_chip *chip;
int ret;
list_for_each_entry_safe(anand, tmp, &nfc->chips, node) {
chip = &anand->chip;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
list_del(&anand->node);
}
}
static int anfc_chips_init(struct arasan_nfc *nfc)
{
struct device_node *np = nfc->dev->of_node, *nand_np;
int nchips = of_get_child_count(np);
int ret;
if (!nchips) {
dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n",
nchips);
return -EINVAL;
}
for_each_child_of_node(np, nand_np) {
ret = anfc_chip_init(nfc, nand_np);
if (ret) {
of_node_put(nand_np);
anfc_chips_cleanup(nfc);
break;
}
}
return ret;
}
static void anfc_reset(struct arasan_nfc *nfc)
{
/* Disable interrupt signals */
writel_relaxed(0, nfc->base + INTR_SIG_EN_REG);
/* Enable interrupt status */
writel_relaxed(EVENT_MASK, nfc->base + INTR_STS_EN_REG);
nfc->cur_cs = -1;
}
static int anfc_parse_cs(struct arasan_nfc *nfc)
{
int ret;
/* Check the gpio-cs property */
ret = rawnand_dt_parse_gpio_cs(nfc->dev, &nfc->cs_array, &nfc->ncs);
if (ret)
return ret;
/*
* The controller native CS cannot be both disabled at the same time.
* Hence, only one native CS can be used if GPIO CS are needed, so that
* the other is selected when a non-native CS must be asserted (not
* wired physically or configured as GPIO instead of NAND CS). In this
* case, the "not" chosen CS is assigned to nfc->spare_cs and selected
* whenever a GPIO CS must be asserted.
*/
if (nfc->cs_array && nfc->ncs > 2) {
if (!nfc->cs_array[0] && !nfc->cs_array[1]) {
dev_err(nfc->dev,
"Assign a single native CS when using GPIOs\n");
return -EINVAL;
}
if (nfc->cs_array[0])
nfc->spare_cs = 0;
else
nfc->spare_cs = 1;
}
if (!nfc->cs_array) {
nfc->cs_array = anfc_default_cs_array;
nfc->ncs = ANFC_MAX_CS;
return 0;
}
return 0;
}
static int anfc_probe(struct platform_device *pdev)
{
struct arasan_nfc *nfc;
int ret;
nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
if (!nfc)
return -ENOMEM;
nfc->dev = &pdev->dev;
nand_controller_init(&nfc->controller);
nfc->controller.ops = &anfc_ops;
INIT_LIST_HEAD(&nfc->chips);
nfc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nfc->base))
return PTR_ERR(nfc->base);
anfc_reset(nfc);
nfc->controller_clk = devm_clk_get_enabled(&pdev->dev, "controller");
if (IS_ERR(nfc->controller_clk))
return PTR_ERR(nfc->controller_clk);
nfc->bus_clk = devm_clk_get_enabled(&pdev->dev, "bus");
if (IS_ERR(nfc->bus_clk))
return PTR_ERR(nfc->bus_clk);
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
return ret;
ret = anfc_parse_cs(nfc);
if (ret)
return ret;
ret = anfc_chips_init(nfc);
if (ret)
return ret;
platform_set_drvdata(pdev, nfc);
return 0;
}
static void anfc_remove(struct platform_device *pdev)
{
struct arasan_nfc *nfc = platform_get_drvdata(pdev);
anfc_chips_cleanup(nfc);
}
static const struct of_device_id anfc_ids[] = {
{
.compatible = "xlnx,zynqmp-nand-controller",
},
{
.compatible = "arasan,nfc-v3p10",
},
{}
};
MODULE_DEVICE_TABLE(of, anfc_ids);
static struct platform_driver anfc_driver = {
.driver = {
.name = "arasan-nand-controller",
.of_match_table = anfc_ids,
},
.probe = anfc_probe,
.remove_new = anfc_remove,
};
module_platform_driver(anfc_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Punnaiah Choudary Kalluri <[email protected]>");
MODULE_AUTHOR("Naga Sureshkumar Relli <[email protected]>");
MODULE_AUTHOR("Miquel Raynal <[email protected]>");
MODULE_DESCRIPTION("Arasan NAND Flash Controller Driver");
| linux-master | drivers/mtd/nand/raw/arasan-nand-controller.c |
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2020 Intel Corporation. */
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/dmaengine.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/nand.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/units.h>
#include <asm/unaligned.h>
#define EBU_CLC 0x000
#define EBU_CLC_RST 0x00000000u
#define EBU_ADDR_SEL(n) (0x020 + (n) * 4)
/* 5 bits 26:22 included for comparison in the ADDR_SELx */
#define EBU_ADDR_MASK(x) ((x) << 4)
#define EBU_ADDR_SEL_REGEN 0x1
#define EBU_BUSCON(n) (0x060 + (n) * 4)
#define EBU_BUSCON_CMULT_V4 0x1
#define EBU_BUSCON_RECOVC(n) ((n) << 2)
#define EBU_BUSCON_HOLDC(n) ((n) << 4)
#define EBU_BUSCON_WAITRDC(n) ((n) << 6)
#define EBU_BUSCON_WAITWRC(n) ((n) << 8)
#define EBU_BUSCON_BCGEN_CS 0x0
#define EBU_BUSCON_SETUP_EN BIT(22)
#define EBU_BUSCON_ALEC 0xC000
#define EBU_CON 0x0B0
#define EBU_CON_NANDM_EN BIT(0)
#define EBU_CON_NANDM_DIS 0x0
#define EBU_CON_CSMUX_E_EN BIT(1)
#define EBU_CON_ALE_P_LOW BIT(2)
#define EBU_CON_CLE_P_LOW BIT(3)
#define EBU_CON_CS_P_LOW BIT(4)
#define EBU_CON_SE_P_LOW BIT(5)
#define EBU_CON_WP_P_LOW BIT(6)
#define EBU_CON_PRE_P_LOW BIT(7)
#define EBU_CON_IN_CS_S(n) ((n) << 8)
#define EBU_CON_OUT_CS_S(n) ((n) << 10)
#define EBU_CON_LAT_EN_CS_P ((0x3D) << 18)
#define EBU_WAIT 0x0B4
#define EBU_WAIT_RDBY BIT(0)
#define EBU_WAIT_WR_C BIT(3)
#define HSNAND_CTL1 0x110
#define HSNAND_CTL1_ADDR_SHIFT 24
#define HSNAND_CTL2 0x114
#define HSNAND_CTL2_ADDR_SHIFT 8
#define HSNAND_CTL2_CYC_N_V5 (0x2 << 16)
#define HSNAND_INT_MSK_CTL 0x124
#define HSNAND_INT_MSK_CTL_WR_C BIT(4)
#define HSNAND_INT_STA 0x128
#define HSNAND_INT_STA_WR_C BIT(4)
#define HSNAND_CTL 0x130
#define HSNAND_CTL_ENABLE_ECC BIT(0)
#define HSNAND_CTL_GO BIT(2)
#define HSNAND_CTL_CE_SEL_CS(n) BIT(3 + (n))
#define HSNAND_CTL_RW_READ 0x0
#define HSNAND_CTL_RW_WRITE BIT(10)
#define HSNAND_CTL_ECC_OFF_V8TH BIT(11)
#define HSNAND_CTL_CKFF_EN 0x0
#define HSNAND_CTL_MSG_EN BIT(17)
#define HSNAND_PARA0 0x13c
#define HSNAND_PARA0_PAGE_V8192 0x3
#define HSNAND_PARA0_PIB_V256 (0x3 << 4)
#define HSNAND_PARA0_BYP_EN_NP 0x0
#define HSNAND_PARA0_BYP_DEC_NP 0x0
#define HSNAND_PARA0_TYPE_ONFI BIT(18)
#define HSNAND_PARA0_ADEP_EN BIT(21)
#define HSNAND_CMSG_0 0x150
#define HSNAND_CMSG_1 0x154
#define HSNAND_ALE_OFFS BIT(2)
#define HSNAND_CLE_OFFS BIT(3)
#define HSNAND_CS_OFFS BIT(4)
#define HSNAND_ECC_OFFSET 0x008
#define MAX_CS 2
#define USEC_PER_SEC 1000000L
struct ebu_nand_cs {
void __iomem *chipaddr;
u32 addr_sel;
};
struct ebu_nand_controller {
struct nand_controller controller;
struct nand_chip chip;
struct device *dev;
void __iomem *ebu;
void __iomem *hsnand;
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
struct completion dma_access_complete;
struct clk *clk;
u32 nd_para0;
u8 cs_num;
struct ebu_nand_cs cs[MAX_CS];
};
static inline struct ebu_nand_controller *nand_to_ebu(struct nand_chip *chip)
{
return container_of(chip, struct ebu_nand_controller, chip);
}
static int ebu_nand_waitrdy(struct nand_chip *chip, int timeout_ms)
{
struct ebu_nand_controller *ctrl = nand_to_ebu(chip);
u32 status;
return readl_poll_timeout(ctrl->ebu + EBU_WAIT, status,
(status & EBU_WAIT_RDBY) ||
(status & EBU_WAIT_WR_C), 20, timeout_ms);
}
static u8 ebu_nand_readb(struct nand_chip *chip)
{
struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
u8 cs_num = ebu_host->cs_num;
u8 val;
val = readb(ebu_host->cs[cs_num].chipaddr + HSNAND_CS_OFFS);
ebu_nand_waitrdy(chip, 1000);
return val;
}
static void ebu_nand_writeb(struct nand_chip *chip, u32 offset, u8 value)
{
struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
u8 cs_num = ebu_host->cs_num;
writeb(value, ebu_host->cs[cs_num].chipaddr + offset);
ebu_nand_waitrdy(chip, 1000);
}
static void ebu_read_buf(struct nand_chip *chip, u_char *buf, unsigned int len)
{
int i;
for (i = 0; i < len; i++)
buf[i] = ebu_nand_readb(chip);
}
static void ebu_write_buf(struct nand_chip *chip, const u_char *buf, int len)
{
int i;
for (i = 0; i < len; i++)
ebu_nand_writeb(chip, HSNAND_CS_OFFS, buf[i]);
}
static void ebu_nand_disable(struct nand_chip *chip)
{
struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
writel(0, ebu_host->ebu + EBU_CON);
}
static void ebu_select_chip(struct nand_chip *chip)
{
struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
void __iomem *nand_con = ebu_host->ebu + EBU_CON;
u32 cs = ebu_host->cs_num;
writel(EBU_CON_NANDM_EN | EBU_CON_CSMUX_E_EN | EBU_CON_CS_P_LOW |
EBU_CON_SE_P_LOW | EBU_CON_WP_P_LOW | EBU_CON_PRE_P_LOW |
EBU_CON_IN_CS_S(cs) | EBU_CON_OUT_CS_S(cs) |
EBU_CON_LAT_EN_CS_P, nand_con);
}
static int ebu_nand_set_timings(struct nand_chip *chip, int csline,
const struct nand_interface_config *conf)
{
struct ebu_nand_controller *ctrl = nand_to_ebu(chip);
unsigned int rate = clk_get_rate(ctrl->clk) / HZ_PER_MHZ;
unsigned int period = DIV_ROUND_UP(USEC_PER_SEC, rate);
const struct nand_sdr_timings *timings;
u32 trecov, thold, twrwait, trdwait;
u32 reg = 0;
timings = nand_get_sdr_timings(conf);
if (IS_ERR(timings))
return PTR_ERR(timings);
if (csline == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
trecov = DIV_ROUND_UP(max(timings->tREA_max, timings->tREH_min),
period);
reg |= EBU_BUSCON_RECOVC(trecov);
thold = DIV_ROUND_UP(max(timings->tDH_min, timings->tDS_min), period);
reg |= EBU_BUSCON_HOLDC(thold);
trdwait = DIV_ROUND_UP(max(timings->tRC_min, timings->tREH_min),
period);
reg |= EBU_BUSCON_WAITRDC(trdwait);
twrwait = DIV_ROUND_UP(max(timings->tWC_min, timings->tWH_min), period);
reg |= EBU_BUSCON_WAITWRC(twrwait);
reg |= EBU_BUSCON_CMULT_V4 | EBU_BUSCON_BCGEN_CS | EBU_BUSCON_ALEC |
EBU_BUSCON_SETUP_EN;
writel(reg, ctrl->ebu + EBU_BUSCON(ctrl->cs_num));
return 0;
}
static int ebu_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section)
return -ERANGE;
oobregion->offset = HSNAND_ECC_OFFSET;
oobregion->length = chip->ecc.total;
return 0;
}
static int ebu_nand_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (section)
return -ERANGE;
oobregion->offset = chip->ecc.total + HSNAND_ECC_OFFSET;
oobregion->length = mtd->oobsize - oobregion->offset;
return 0;
}
static const struct mtd_ooblayout_ops ebu_nand_ooblayout_ops = {
.ecc = ebu_nand_ooblayout_ecc,
.free = ebu_nand_ooblayout_free,
};
static void ebu_dma_rx_callback(void *cookie)
{
struct ebu_nand_controller *ebu_host = cookie;
dmaengine_terminate_async(ebu_host->dma_rx);
complete(&ebu_host->dma_access_complete);
}
static void ebu_dma_tx_callback(void *cookie)
{
struct ebu_nand_controller *ebu_host = cookie;
dmaengine_terminate_async(ebu_host->dma_tx);
complete(&ebu_host->dma_access_complete);
}
static int ebu_dma_start(struct ebu_nand_controller *ebu_host, u32 dir,
const u8 *buf, u32 len)
{
struct dma_async_tx_descriptor *tx;
struct completion *dma_completion;
dma_async_tx_callback callback;
struct dma_chan *chan;
dma_cookie_t cookie;
unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
dma_addr_t buf_dma;
int ret;
u32 timeout;
if (dir == DMA_DEV_TO_MEM) {
chan = ebu_host->dma_rx;
dma_completion = &ebu_host->dma_access_complete;
callback = ebu_dma_rx_callback;
} else {
chan = ebu_host->dma_tx;
dma_completion = &ebu_host->dma_access_complete;
callback = ebu_dma_tx_callback;
}
buf_dma = dma_map_single(chan->device->dev, (void *)buf, len, dir);
if (dma_mapping_error(chan->device->dev, buf_dma)) {
dev_err(ebu_host->dev, "Failed to map DMA buffer\n");
ret = -EIO;
goto err_unmap;
}
tx = dmaengine_prep_slave_single(chan, buf_dma, len, dir, flags);
if (!tx) {
ret = -ENXIO;
goto err_unmap;
}
tx->callback = callback;
tx->callback_param = ebu_host;
cookie = tx->tx_submit(tx);
ret = dma_submit_error(cookie);
if (ret) {
dev_err(ebu_host->dev, "dma_submit_error %d\n", cookie);
ret = -EIO;
goto err_unmap;
}
init_completion(dma_completion);
dma_async_issue_pending(chan);
/* Wait DMA to finish the data transfer.*/
timeout = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
if (!timeout) {
dev_err(ebu_host->dev, "I/O Error in DMA RX (status %d)\n",
dmaengine_tx_status(chan, cookie, NULL));
dmaengine_terminate_sync(chan);
ret = -ETIMEDOUT;
goto err_unmap;
}
return 0;
err_unmap:
dma_unmap_single(ebu_host->dev, buf_dma, len, dir);
return ret;
}
static void ebu_nand_trigger(struct ebu_nand_controller *ebu_host,
int page, u32 cmd)
{
unsigned int val;
val = cmd | (page & 0xFF) << HSNAND_CTL1_ADDR_SHIFT;
writel(val, ebu_host->hsnand + HSNAND_CTL1);
val = (page & 0xFFFF00) >> 8 | HSNAND_CTL2_CYC_N_V5;
writel(val, ebu_host->hsnand + HSNAND_CTL2);
writel(ebu_host->nd_para0, ebu_host->hsnand + HSNAND_PARA0);
/* clear first, will update later */
writel(0xFFFFFFFF, ebu_host->hsnand + HSNAND_CMSG_0);
writel(0xFFFFFFFF, ebu_host->hsnand + HSNAND_CMSG_1);
writel(HSNAND_INT_MSK_CTL_WR_C,
ebu_host->hsnand + HSNAND_INT_MSK_CTL);
if (!cmd)
val = HSNAND_CTL_RW_READ;
else
val = HSNAND_CTL_RW_WRITE;
writel(HSNAND_CTL_MSG_EN | HSNAND_CTL_CKFF_EN |
HSNAND_CTL_ECC_OFF_V8TH | HSNAND_CTL_CE_SEL_CS(ebu_host->cs_num) |
HSNAND_CTL_ENABLE_ECC | HSNAND_CTL_GO | val,
ebu_host->hsnand + HSNAND_CTL);
}
static int ebu_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
int ret, reg_data;
ebu_nand_trigger(ebu_host, page, NAND_CMD_READ0);
ret = ebu_dma_start(ebu_host, DMA_DEV_TO_MEM, buf, mtd->writesize);
if (ret)
return ret;
if (oob_required)
chip->ecc.read_oob(chip, page);
reg_data = readl(ebu_host->hsnand + HSNAND_CTL);
reg_data &= ~HSNAND_CTL_GO;
writel(reg_data, ebu_host->hsnand + HSNAND_CTL);
return 0;
}
static int ebu_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
void __iomem *int_sta = ebu_host->hsnand + HSNAND_INT_STA;
int reg_data, ret, val;
u32 reg;
ebu_nand_trigger(ebu_host, page, NAND_CMD_SEQIN);
ret = ebu_dma_start(ebu_host, DMA_MEM_TO_DEV, buf, mtd->writesize);
if (ret)
return ret;
if (oob_required) {
reg = get_unaligned_le32(chip->oob_poi);
writel(reg, ebu_host->hsnand + HSNAND_CMSG_0);
reg = get_unaligned_le32(chip->oob_poi + 4);
writel(reg, ebu_host->hsnand + HSNAND_CMSG_1);
}
ret = readl_poll_timeout_atomic(int_sta, val, !(val & HSNAND_INT_STA_WR_C),
10, 1000);
if (ret)
return ret;
reg_data = readl(ebu_host->hsnand + HSNAND_CTL);
reg_data &= ~HSNAND_CTL_GO;
writel(reg_data, ebu_host->hsnand + HSNAND_CTL);
return 0;
}
static const u8 ecc_strength[] = { 1, 1, 4, 8, 24, 32, 40, 60, };
static int ebu_nand_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip);
u32 ecc_steps, ecc_bytes, ecc_total, pagesize, pg_per_blk;
u32 ecc_strength_ds = chip->ecc.strength;
u32 ecc_size = chip->ecc.size;
u32 writesize = mtd->writesize;
u32 blocksize = mtd->erasesize;
int bch_algo, start, val;
/* Default to an ECC size of 512 */
if (!chip->ecc.size)
chip->ecc.size = 512;
switch (ecc_size) {
case 512:
start = 1;
if (!ecc_strength_ds)
ecc_strength_ds = 4;
break;
case 1024:
start = 4;
if (!ecc_strength_ds)
ecc_strength_ds = 32;
break;
default:
return -EINVAL;
}
/* BCH ECC algorithm Settings for number of bits per 512B/1024B */
bch_algo = round_up(start + 1, 4);
for (val = start; val < bch_algo; val++) {
if (ecc_strength_ds == ecc_strength[val])
break;
}
if (val == bch_algo)
return -EINVAL;
if (ecc_strength_ds == 8)
ecc_bytes = 14;
else
ecc_bytes = DIV_ROUND_UP(ecc_strength_ds * fls(8 * ecc_size), 8);
ecc_steps = writesize / ecc_size;
ecc_total = ecc_steps * ecc_bytes;
if ((ecc_total + 8) > mtd->oobsize)
return -ERANGE;
chip->ecc.total = ecc_total;
pagesize = fls(writesize >> 11);
if (pagesize > HSNAND_PARA0_PAGE_V8192)
return -ERANGE;
pg_per_blk = fls((blocksize / writesize) >> 6) / 8;
if (pg_per_blk > HSNAND_PARA0_PIB_V256)
return -ERANGE;
ebu_host->nd_para0 = pagesize | pg_per_blk | HSNAND_PARA0_BYP_EN_NP |
HSNAND_PARA0_BYP_DEC_NP | HSNAND_PARA0_ADEP_EN |
HSNAND_PARA0_TYPE_ONFI | (val << 29);
mtd_set_ooblayout(mtd, &ebu_nand_ooblayout_ops);
chip->ecc.read_page = ebu_nand_read_page_hwecc;
chip->ecc.write_page = ebu_nand_write_page_hwecc;
return 0;
}
static int ebu_nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op, bool check_only)
{
const struct nand_op_instr *instr = NULL;
unsigned int op_id;
int i, timeout_ms, ret = 0;
if (check_only)
return 0;
ebu_select_chip(chip);
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
switch (instr->type) {
case NAND_OP_CMD_INSTR:
ebu_nand_writeb(chip, HSNAND_CLE_OFFS | HSNAND_CS_OFFS,
instr->ctx.cmd.opcode);
break;
case NAND_OP_ADDR_INSTR:
for (i = 0; i < instr->ctx.addr.naddrs; i++)
ebu_nand_writeb(chip,
HSNAND_ALE_OFFS | HSNAND_CS_OFFS,
instr->ctx.addr.addrs[i]);
break;
case NAND_OP_DATA_IN_INSTR:
ebu_read_buf(chip, instr->ctx.data.buf.in,
instr->ctx.data.len);
break;
case NAND_OP_DATA_OUT_INSTR:
ebu_write_buf(chip, instr->ctx.data.buf.out,
instr->ctx.data.len);
break;
case NAND_OP_WAITRDY_INSTR:
timeout_ms = instr->ctx.waitrdy.timeout_ms * 1000;
ret = ebu_nand_waitrdy(chip, timeout_ms);
break;
}
}
return ret;
}
static const struct nand_controller_ops ebu_nand_controller_ops = {
.attach_chip = ebu_nand_attach_chip,
.setup_interface = ebu_nand_set_timings,
.exec_op = ebu_nand_exec_op,
};
static void ebu_dma_cleanup(struct ebu_nand_controller *ebu_host)
{
if (ebu_host->dma_rx)
dma_release_channel(ebu_host->dma_rx);
if (ebu_host->dma_tx)
dma_release_channel(ebu_host->dma_tx);
}
static int ebu_nand_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ebu_nand_controller *ebu_host;
struct device_node *chip_np;
struct nand_chip *nand;
struct mtd_info *mtd;
struct resource *res;
char *resname;
int ret;
u32 cs;
ebu_host = devm_kzalloc(dev, sizeof(*ebu_host), GFP_KERNEL);
if (!ebu_host)
return -ENOMEM;
ebu_host->dev = dev;
nand_controller_init(&ebu_host->controller);
ebu_host->ebu = devm_platform_ioremap_resource_byname(pdev, "ebunand");
if (IS_ERR(ebu_host->ebu))
return PTR_ERR(ebu_host->ebu);
ebu_host->hsnand = devm_platform_ioremap_resource_byname(pdev, "hsnand");
if (IS_ERR(ebu_host->hsnand))
return PTR_ERR(ebu_host->hsnand);
chip_np = of_get_next_child(dev->of_node, NULL);
if (!chip_np)
return dev_err_probe(dev, -EINVAL,
"Could not find child node for the NAND chip\n");
ret = of_property_read_u32(chip_np, "reg", &cs);
if (ret) {
dev_err(dev, "failed to get chip select: %d\n", ret);
goto err_of_node_put;
}
if (cs >= MAX_CS) {
dev_err(dev, "got invalid chip select: %d\n", cs);
ret = -EINVAL;
goto err_of_node_put;
}
ebu_host->cs_num = cs;
resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
ebu_host->cs[cs].chipaddr = devm_platform_ioremap_resource_byname(pdev,
resname);
if (IS_ERR(ebu_host->cs[cs].chipaddr)) {
ret = PTR_ERR(ebu_host->cs[cs].chipaddr);
goto err_of_node_put;
}
ebu_host->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(ebu_host->clk)) {
ret = dev_err_probe(dev, PTR_ERR(ebu_host->clk),
"failed to get and enable clock\n");
goto err_of_node_put;
}
ebu_host->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(ebu_host->dma_tx)) {
ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx),
"failed to request DMA tx chan!.\n");
goto err_of_node_put;
}
ebu_host->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR(ebu_host->dma_rx)) {
ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_rx),
"failed to request DMA rx chan!.\n");
ebu_host->dma_rx = NULL;
goto err_cleanup_dma;
}
resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
if (!res) {
ret = -EINVAL;
goto err_cleanup_dma;
}
ebu_host->cs[cs].addr_sel = res->start;
writel(ebu_host->cs[cs].addr_sel | EBU_ADDR_MASK(5) | EBU_ADDR_SEL_REGEN,
ebu_host->ebu + EBU_ADDR_SEL(cs));
nand_set_flash_node(&ebu_host->chip, chip_np);
mtd = nand_to_mtd(&ebu_host->chip);
if (!mtd->name) {
dev_err(ebu_host->dev, "NAND label property is mandatory\n");
ret = -EINVAL;
goto err_cleanup_dma;
}
mtd->dev.parent = dev;
ebu_host->dev = dev;
platform_set_drvdata(pdev, ebu_host);
nand_set_controller_data(&ebu_host->chip, ebu_host);
nand = &ebu_host->chip;
nand->controller = &ebu_host->controller;
nand->controller->ops = &ebu_nand_controller_ops;
/* Scan to find existence of the device */
ret = nand_scan(&ebu_host->chip, 1);
if (ret)
goto err_cleanup_dma;
ret = mtd_device_register(mtd, NULL, 0);
if (ret)
goto err_clean_nand;
return 0;
err_clean_nand:
nand_cleanup(&ebu_host->chip);
err_cleanup_dma:
ebu_dma_cleanup(ebu_host);
err_of_node_put:
of_node_put(chip_np);
return ret;
}
static void ebu_nand_remove(struct platform_device *pdev)
{
struct ebu_nand_controller *ebu_host = platform_get_drvdata(pdev);
int ret;
ret = mtd_device_unregister(nand_to_mtd(&ebu_host->chip));
WARN_ON(ret);
nand_cleanup(&ebu_host->chip);
ebu_nand_disable(&ebu_host->chip);
ebu_dma_cleanup(ebu_host);
}
static const struct of_device_id ebu_nand_match[] = {
{ .compatible = "intel,lgm-ebunand" },
{}
};
MODULE_DEVICE_TABLE(of, ebu_nand_match);
static struct platform_driver ebu_nand_driver = {
.probe = ebu_nand_probe,
.remove_new = ebu_nand_remove,
.driver = {
.name = "intel-nand-controller",
.of_match_table = ebu_nand_match,
},
};
module_platform_driver(ebu_nand_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Vadivel Murugan R <[email protected]>");
MODULE_DESCRIPTION("Intel's LGM External Bus NAND Controller driver");
| linux-master | drivers/mtd/nand/raw/intel-nand-controller.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Macronix International Co., Ltd.
*
* Author:
* Mason Yang <[email protected]>
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/platform_device.h>
#include "internals.h"
#define HC_CFG 0x0
#define HC_CFG_IF_CFG(x) ((x) << 27)
#define HC_CFG_DUAL_SLAVE BIT(31)
#define HC_CFG_INDIVIDUAL BIT(30)
#define HC_CFG_NIO(x) (((x) / 4) << 27)
#define HC_CFG_TYPE(s, t) ((t) << (23 + ((s) * 2)))
#define HC_CFG_TYPE_SPI_NOR 0
#define HC_CFG_TYPE_SPI_NAND 1
#define HC_CFG_TYPE_SPI_RAM 2
#define HC_CFG_TYPE_RAW_NAND 3
#define HC_CFG_SLV_ACT(x) ((x) << 21)
#define HC_CFG_CLK_PH_EN BIT(20)
#define HC_CFG_CLK_POL_INV BIT(19)
#define HC_CFG_BIG_ENDIAN BIT(18)
#define HC_CFG_DATA_PASS BIT(17)
#define HC_CFG_IDLE_SIO_LVL(x) ((x) << 16)
#define HC_CFG_MAN_START_EN BIT(3)
#define HC_CFG_MAN_START BIT(2)
#define HC_CFG_MAN_CS_EN BIT(1)
#define HC_CFG_MAN_CS_ASSERT BIT(0)
#define INT_STS 0x4
#define INT_STS_EN 0x8
#define INT_SIG_EN 0xc
#define INT_STS_ALL GENMASK(31, 0)
#define INT_RDY_PIN BIT(26)
#define INT_RDY_SR BIT(25)
#define INT_LNR_SUSP BIT(24)
#define INT_ECC_ERR BIT(17)
#define INT_CRC_ERR BIT(16)
#define INT_LWR_DIS BIT(12)
#define INT_LRD_DIS BIT(11)
#define INT_SDMA_INT BIT(10)
#define INT_DMA_FINISH BIT(9)
#define INT_RX_NOT_FULL BIT(3)
#define INT_RX_NOT_EMPTY BIT(2)
#define INT_TX_NOT_FULL BIT(1)
#define INT_TX_EMPTY BIT(0)
#define HC_EN 0x10
#define HC_EN_BIT BIT(0)
#define TXD(x) (0x14 + ((x) * 4))
#define RXD 0x24
#define SS_CTRL(s) (0x30 + ((s) * 4))
#define LRD_CFG 0x44
#define LWR_CFG 0x80
#define RWW_CFG 0x70
#define OP_READ BIT(23)
#define OP_DUMMY_CYC(x) ((x) << 17)
#define OP_ADDR_BYTES(x) ((x) << 14)
#define OP_CMD_BYTES(x) (((x) - 1) << 13)
#define OP_OCTA_CRC_EN BIT(12)
#define OP_DQS_EN BIT(11)
#define OP_ENHC_EN BIT(10)
#define OP_PREAMBLE_EN BIT(9)
#define OP_DATA_DDR BIT(8)
#define OP_DATA_BUSW(x) ((x) << 6)
#define OP_ADDR_DDR BIT(5)
#define OP_ADDR_BUSW(x) ((x) << 3)
#define OP_CMD_DDR BIT(2)
#define OP_CMD_BUSW(x) (x)
#define OP_BUSW_1 0
#define OP_BUSW_2 1
#define OP_BUSW_4 2
#define OP_BUSW_8 3
#define OCTA_CRC 0x38
#define OCTA_CRC_IN_EN(s) BIT(3 + ((s) * 16))
#define OCTA_CRC_CHUNK(s, x) ((fls((x) / 32)) << (1 + ((s) * 16)))
#define OCTA_CRC_OUT_EN(s) BIT(0 + ((s) * 16))
#define ONFI_DIN_CNT(s) (0x3c + (s))
#define LRD_CTRL 0x48
#define RWW_CTRL 0x74
#define LWR_CTRL 0x84
#define LMODE_EN BIT(31)
#define LMODE_SLV_ACT(x) ((x) << 21)
#define LMODE_CMD1(x) ((x) << 8)
#define LMODE_CMD0(x) (x)
#define LRD_ADDR 0x4c
#define LWR_ADDR 0x88
#define LRD_RANGE 0x50
#define LWR_RANGE 0x8c
#define AXI_SLV_ADDR 0x54
#define DMAC_RD_CFG 0x58
#define DMAC_WR_CFG 0x94
#define DMAC_CFG_PERIPH_EN BIT(31)
#define DMAC_CFG_ALLFLUSH_EN BIT(30)
#define DMAC_CFG_LASTFLUSH_EN BIT(29)
#define DMAC_CFG_QE(x) (((x) + 1) << 16)
#define DMAC_CFG_BURST_LEN(x) (((x) + 1) << 12)
#define DMAC_CFG_BURST_SZ(x) ((x) << 8)
#define DMAC_CFG_DIR_READ BIT(1)
#define DMAC_CFG_START BIT(0)
#define DMAC_RD_CNT 0x5c
#define DMAC_WR_CNT 0x98
#define SDMA_ADDR 0x60
#define DMAM_CFG 0x64
#define DMAM_CFG_START BIT(31)
#define DMAM_CFG_CONT BIT(30)
#define DMAM_CFG_SDMA_GAP(x) (fls((x) / 8192) << 2)
#define DMAM_CFG_DIR_READ BIT(1)
#define DMAM_CFG_EN BIT(0)
#define DMAM_CNT 0x68
#define LNR_TIMER_TH 0x6c
#define RDM_CFG0 0x78
#define RDM_CFG0_POLY(x) (x)
#define RDM_CFG1 0x7c
#define RDM_CFG1_RDM_EN BIT(31)
#define RDM_CFG1_SEED(x) (x)
#define LWR_SUSP_CTRL 0x90
#define LWR_SUSP_CTRL_EN BIT(31)
#define DMAS_CTRL 0x9c
#define DMAS_CTRL_EN BIT(31)
#define DMAS_CTRL_DIR_READ BIT(30)
#define DATA_STROB 0xa0
#define DATA_STROB_EDO_EN BIT(2)
#define DATA_STROB_INV_POL BIT(1)
#define DATA_STROB_DELAY_2CYC BIT(0)
#define IDLY_CODE(x) (0xa4 + ((x) * 4))
#define IDLY_CODE_VAL(x, v) ((v) << (((x) % 4) * 8))
#define GPIO 0xc4
#define GPIO_PT(x) BIT(3 + ((x) * 16))
#define GPIO_RESET(x) BIT(2 + ((x) * 16))
#define GPIO_HOLDB(x) BIT(1 + ((x) * 16))
#define GPIO_WPB(x) BIT((x) * 16)
#define HC_VER 0xd0
#define HW_TEST(x) (0xe0 + ((x) * 4))
#define MXIC_NFC_MAX_CLK_HZ 50000000
#define IRQ_TIMEOUT 1000
struct mxic_nand_ctlr {
struct clk *ps_clk;
struct clk *send_clk;
struct clk *send_dly_clk;
struct completion complete;
void __iomem *regs;
struct nand_controller controller;
struct device *dev;
struct nand_chip chip;
};
static int mxic_nfc_clk_enable(struct mxic_nand_ctlr *nfc)
{
int ret;
ret = clk_prepare_enable(nfc->ps_clk);
if (ret)
return ret;
ret = clk_prepare_enable(nfc->send_clk);
if (ret)
goto err_ps_clk;
ret = clk_prepare_enable(nfc->send_dly_clk);
if (ret)
goto err_send_dly_clk;
return ret;
err_send_dly_clk:
clk_disable_unprepare(nfc->send_clk);
err_ps_clk:
clk_disable_unprepare(nfc->ps_clk);
return ret;
}
static void mxic_nfc_clk_disable(struct mxic_nand_ctlr *nfc)
{
clk_disable_unprepare(nfc->send_clk);
clk_disable_unprepare(nfc->send_dly_clk);
clk_disable_unprepare(nfc->ps_clk);
}
static void mxic_nfc_set_input_delay(struct mxic_nand_ctlr *nfc, u8 idly_code)
{
writel(IDLY_CODE_VAL(0, idly_code) |
IDLY_CODE_VAL(1, idly_code) |
IDLY_CODE_VAL(2, idly_code) |
IDLY_CODE_VAL(3, idly_code),
nfc->regs + IDLY_CODE(0));
writel(IDLY_CODE_VAL(4, idly_code) |
IDLY_CODE_VAL(5, idly_code) |
IDLY_CODE_VAL(6, idly_code) |
IDLY_CODE_VAL(7, idly_code),
nfc->regs + IDLY_CODE(1));
}
static int mxic_nfc_clk_setup(struct mxic_nand_ctlr *nfc, unsigned long freq)
{
int ret;
ret = clk_set_rate(nfc->send_clk, freq);
if (ret)
return ret;
ret = clk_set_rate(nfc->send_dly_clk, freq);
if (ret)
return ret;
/*
* A constant delay range from 0x0 ~ 0x1F for input delay,
* the unit is 78 ps, the max input delay is 2.418 ns.
*/
mxic_nfc_set_input_delay(nfc, 0xf);
/*
* Phase degree = 360 * freq * output-delay
* where output-delay is a constant value 1 ns in FPGA.
*
* Get Phase degree = 360 * freq * 1 ns
* = 360 * freq * 1 sec / 1000000000
* = 9 * freq / 25000000
*/
ret = clk_set_phase(nfc->send_dly_clk, 9 * freq / 25000000);
if (ret)
return ret;
return 0;
}
static int mxic_nfc_set_freq(struct mxic_nand_ctlr *nfc, unsigned long freq)
{
int ret;
if (freq > MXIC_NFC_MAX_CLK_HZ)
freq = MXIC_NFC_MAX_CLK_HZ;
mxic_nfc_clk_disable(nfc);
ret = mxic_nfc_clk_setup(nfc, freq);
if (ret)
return ret;
ret = mxic_nfc_clk_enable(nfc);
if (ret)
return ret;
return 0;
}
static irqreturn_t mxic_nfc_isr(int irq, void *dev_id)
{
struct mxic_nand_ctlr *nfc = dev_id;
u32 sts;
sts = readl(nfc->regs + INT_STS);
if (sts & INT_RDY_PIN)
complete(&nfc->complete);
else
return IRQ_NONE;
return IRQ_HANDLED;
}
static void mxic_nfc_hw_init(struct mxic_nand_ctlr *nfc)
{
writel(HC_CFG_NIO(8) | HC_CFG_TYPE(1, HC_CFG_TYPE_RAW_NAND) |
HC_CFG_SLV_ACT(0) | HC_CFG_MAN_CS_EN |
HC_CFG_IDLE_SIO_LVL(1), nfc->regs + HC_CFG);
writel(INT_STS_ALL, nfc->regs + INT_STS_EN);
writel(INT_RDY_PIN, nfc->regs + INT_SIG_EN);
writel(0x0, nfc->regs + ONFI_DIN_CNT(0));
writel(0, nfc->regs + LRD_CFG);
writel(0, nfc->regs + LRD_CTRL);
writel(0x0, nfc->regs + HC_EN);
}
static void mxic_nfc_cs_enable(struct mxic_nand_ctlr *nfc)
{
writel(readl(nfc->regs + HC_CFG) | HC_CFG_MAN_CS_EN,
nfc->regs + HC_CFG);
writel(HC_CFG_MAN_CS_ASSERT | readl(nfc->regs + HC_CFG),
nfc->regs + HC_CFG);
}
static void mxic_nfc_cs_disable(struct mxic_nand_ctlr *nfc)
{
writel(~HC_CFG_MAN_CS_ASSERT & readl(nfc->regs + HC_CFG),
nfc->regs + HC_CFG);
}
static int mxic_nfc_wait_ready(struct nand_chip *chip)
{
struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip);
int ret;
ret = wait_for_completion_timeout(&nfc->complete,
msecs_to_jiffies(IRQ_TIMEOUT));
if (!ret) {
dev_err(nfc->dev, "nand device timeout\n");
return -ETIMEDOUT;
}
return 0;
}
static int mxic_nfc_data_xfer(struct mxic_nand_ctlr *nfc, const void *txbuf,
void *rxbuf, unsigned int len)
{
unsigned int pos = 0;
while (pos < len) {
unsigned int nbytes = len - pos;
u32 data = 0xffffffff;
u32 sts;
int ret;
if (nbytes > 4)
nbytes = 4;
if (txbuf)
memcpy(&data, txbuf + pos, nbytes);
ret = readl_poll_timeout(nfc->regs + INT_STS, sts,
sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
if (ret)
return ret;
writel(data, nfc->regs + TXD(nbytes % 4));
ret = readl_poll_timeout(nfc->regs + INT_STS, sts,
sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
if (ret)
return ret;
ret = readl_poll_timeout(nfc->regs + INT_STS, sts,
sts & INT_RX_NOT_EMPTY, 0,
USEC_PER_SEC);
if (ret)
return ret;
data = readl(nfc->regs + RXD);
if (rxbuf) {
data >>= (8 * (4 - nbytes));
memcpy(rxbuf + pos, &data, nbytes);
}
if (readl(nfc->regs + INT_STS) & INT_RX_NOT_EMPTY)
dev_warn(nfc->dev, "RX FIFO not empty\n");
pos += nbytes;
}
return 0;
}
static int mxic_nfc_exec_op(struct nand_chip *chip,
const struct nand_operation *op, bool check_only)
{
struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip);
const struct nand_op_instr *instr = NULL;
int ret = 0;
unsigned int op_id;
if (check_only)
return 0;
mxic_nfc_cs_enable(nfc);
init_completion(&nfc->complete);
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
switch (instr->type) {
case NAND_OP_CMD_INSTR:
writel(0, nfc->regs + HC_EN);
writel(HC_EN_BIT, nfc->regs + HC_EN);
writel(OP_CMD_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) |
OP_CMD_BYTES(0), nfc->regs + SS_CTRL(0));
ret = mxic_nfc_data_xfer(nfc,
&instr->ctx.cmd.opcode,
NULL, 1);
break;
case NAND_OP_ADDR_INSTR:
writel(OP_ADDR_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) |
OP_ADDR_BYTES(instr->ctx.addr.naddrs),
nfc->regs + SS_CTRL(0));
ret = mxic_nfc_data_xfer(nfc,
instr->ctx.addr.addrs, NULL,
instr->ctx.addr.naddrs);
break;
case NAND_OP_DATA_IN_INSTR:
writel(0x0, nfc->regs + ONFI_DIN_CNT(0));
writel(OP_DATA_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) |
OP_READ, nfc->regs + SS_CTRL(0));
ret = mxic_nfc_data_xfer(nfc, NULL,
instr->ctx.data.buf.in,
instr->ctx.data.len);
break;
case NAND_OP_DATA_OUT_INSTR:
writel(instr->ctx.data.len,
nfc->regs + ONFI_DIN_CNT(0));
writel(OP_DATA_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F),
nfc->regs + SS_CTRL(0));
ret = mxic_nfc_data_xfer(nfc,
instr->ctx.data.buf.out, NULL,
instr->ctx.data.len);
break;
case NAND_OP_WAITRDY_INSTR:
ret = mxic_nfc_wait_ready(chip);
break;
}
}
mxic_nfc_cs_disable(nfc);
return ret;
}
static int mxic_nfc_setup_interface(struct nand_chip *chip, int chipnr,
const struct nand_interface_config *conf)
{
struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip);
const struct nand_sdr_timings *sdr;
unsigned long freq;
int ret;
sdr = nand_get_sdr_timings(conf);
if (IS_ERR(sdr))
return PTR_ERR(sdr);
if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
freq = NSEC_PER_SEC / (sdr->tRC_min / 1000);
ret = mxic_nfc_set_freq(nfc, freq);
if (ret)
dev_err(nfc->dev, "set freq:%ld failed\n", freq);
if (sdr->tRC_min < 30000)
writel(DATA_STROB_EDO_EN, nfc->regs + DATA_STROB);
return 0;
}
static const struct nand_controller_ops mxic_nand_controller_ops = {
.exec_op = mxic_nfc_exec_op,
.setup_interface = mxic_nfc_setup_interface,
};
static int mxic_nfc_probe(struct platform_device *pdev)
{
struct device_node *nand_np, *np = pdev->dev.of_node;
struct mtd_info *mtd;
struct mxic_nand_ctlr *nfc;
struct nand_chip *nand_chip;
int err;
int irq;
nfc = devm_kzalloc(&pdev->dev, sizeof(struct mxic_nand_ctlr),
GFP_KERNEL);
if (!nfc)
return -ENOMEM;
nfc->ps_clk = devm_clk_get(&pdev->dev, "ps");
if (IS_ERR(nfc->ps_clk))
return PTR_ERR(nfc->ps_clk);
nfc->send_clk = devm_clk_get(&pdev->dev, "send");
if (IS_ERR(nfc->send_clk))
return PTR_ERR(nfc->send_clk);
nfc->send_dly_clk = devm_clk_get(&pdev->dev, "send_dly");
if (IS_ERR(nfc->send_dly_clk))
return PTR_ERR(nfc->send_dly_clk);
nfc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nfc->regs))
return PTR_ERR(nfc->regs);
nand_chip = &nfc->chip;
mtd = nand_to_mtd(nand_chip);
mtd->dev.parent = &pdev->dev;
for_each_child_of_node(np, nand_np)
nand_set_flash_node(nand_chip, nand_np);
nand_chip->priv = nfc;
nfc->dev = &pdev->dev;
nfc->controller.ops = &mxic_nand_controller_ops;
nand_controller_init(&nfc->controller);
nand_chip->controller = &nfc->controller;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
mxic_nfc_hw_init(nfc);
err = devm_request_irq(&pdev->dev, irq, mxic_nfc_isr,
0, "mxic-nfc", nfc);
if (err)
goto fail;
err = nand_scan(nand_chip, 1);
if (err)
goto fail;
err = mtd_device_register(mtd, NULL, 0);
if (err)
goto fail;
platform_set_drvdata(pdev, nfc);
return 0;
fail:
mxic_nfc_clk_disable(nfc);
return err;
}
static void mxic_nfc_remove(struct platform_device *pdev)
{
struct mxic_nand_ctlr *nfc = platform_get_drvdata(pdev);
struct nand_chip *chip = &nfc->chip;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
mxic_nfc_clk_disable(nfc);
}
static const struct of_device_id mxic_nfc_of_ids[] = {
{ .compatible = "mxic,multi-itfc-v009-nand-controller", },
{},
};
MODULE_DEVICE_TABLE(of, mxic_nfc_of_ids);
static struct platform_driver mxic_nfc_driver = {
.probe = mxic_nfc_probe,
.remove_new = mxic_nfc_remove,
.driver = {
.name = "mxic-nfc",
.of_match_table = mxic_nfc_of_ids,
},
};
module_platform_driver(mxic_nfc_driver);
MODULE_AUTHOR("Mason Yang <[email protected]>");
MODULE_DESCRIPTION("Macronix raw NAND controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mtd/nand/raw/mxic_nand.c |
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Amlogic Meson Nand Flash Controller Driver
*
* Copyright (c) 2018 Amlogic, inc.
* Author: Liang Yang <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/mtd.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/sched/task_stack.h>
#define NFC_REG_CMD 0x00
#define NFC_CMD_IDLE (0xc << 14)
#define NFC_CMD_CLE (0x5 << 14)
#define NFC_CMD_ALE (0x6 << 14)
#define NFC_CMD_ADL ((0 << 16) | (3 << 20))
#define NFC_CMD_ADH ((1 << 16) | (3 << 20))
#define NFC_CMD_AIL ((2 << 16) | (3 << 20))
#define NFC_CMD_AIH ((3 << 16) | (3 << 20))
#define NFC_CMD_SEED ((8 << 16) | (3 << 20))
#define NFC_CMD_M2N ((0 << 17) | (2 << 20))
#define NFC_CMD_N2M ((1 << 17) | (2 << 20))
#define NFC_CMD_RB BIT(20)
#define NFC_CMD_SCRAMBLER_ENABLE BIT(19)
#define NFC_CMD_SCRAMBLER_DISABLE 0
#define NFC_CMD_SHORTMODE_DISABLE 0
#define NFC_CMD_RB_INT BIT(14)
#define NFC_CMD_RB_INT_NO_PIN ((0xb << 10) | BIT(18) | BIT(16))
#define NFC_CMD_GET_SIZE(x) (((x) >> 22) & GENMASK(4, 0))
#define NFC_REG_CFG 0x04
#define NFC_REG_DADR 0x08
#define NFC_REG_IADR 0x0c
#define NFC_REG_BUF 0x10
#define NFC_REG_INFO 0x14
#define NFC_REG_DC 0x18
#define NFC_REG_ADR 0x1c
#define NFC_REG_DL 0x20
#define NFC_REG_DH 0x24
#define NFC_REG_CADR 0x28
#define NFC_REG_SADR 0x2c
#define NFC_REG_PINS 0x30
#define NFC_REG_VER 0x38
#define NFC_RB_IRQ_EN BIT(21)
#define CLK_DIV_SHIFT 0
#define CLK_DIV_WIDTH 6
#define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \
( \
(cmd_dir) | \
((ran) << 19) | \
((bch) << 14) | \
((short_mode) << 13) | \
(((page_size) & 0x7f) << 6) | \
((pages) & 0x3f) \
)
#define GENCMDDADDRL(adl, addr) ((adl) | ((addr) & 0xffff))
#define GENCMDDADDRH(adh, addr) ((adh) | (((addr) >> 16) & 0xffff))
#define GENCMDIADDRL(ail, addr) ((ail) | ((addr) & 0xffff))
#define GENCMDIADDRH(aih, addr) ((aih) | (((addr) >> 16) & 0xffff))
#define DMA_DIR(dir) ((dir) ? NFC_CMD_N2M : NFC_CMD_M2N)
#define DMA_ADDR_ALIGN 8
#define ECC_CHECK_RETURN_FF (-1)
#define NAND_CE0 (0xe << 10)
#define NAND_CE1 (0xd << 10)
#define DMA_BUSY_TIMEOUT 0x100000
#define CMD_FIFO_EMPTY_TIMEOUT 1000
#define MAX_CE_NUM 2
/* eMMC clock register, misc control */
#define CLK_SELECT_NAND BIT(31)
#define NFC_CLK_CYCLE 6
/* nand flash controller delay 3 ns */
#define NFC_DEFAULT_DELAY 3000
#define ROW_ADDER(page, index) (((page) >> (8 * (index))) & 0xff)
#define MAX_CYCLE_ADDRS 5
#define DIRREAD 1
#define DIRWRITE 0
#define ECC_PARITY_BCH8_512B 14
#define ECC_COMPLETE BIT(31)
#define ECC_ERR_CNT(x) (((x) >> 24) & GENMASK(5, 0))
#define ECC_ZERO_CNT(x) (((x) >> 16) & GENMASK(5, 0))
#define ECC_UNCORRECTABLE 0x3f
#define PER_INFO_BYTE 8
#define NFC_CMD_RAW_LEN GENMASK(13, 0)
#define NFC_COLUMN_ADDR_0 0
#define NFC_COLUMN_ADDR_1 0
struct meson_nfc_nand_chip {
struct list_head node;
struct nand_chip nand;
unsigned long clk_rate;
unsigned long level1_divider;
u32 bus_timing;
u32 twb;
u32 tadl;
u32 tbers_max;
u32 bch_mode;
u8 *data_buf;
__le64 *info_buf;
u32 nsels;
u8 sels[];
};
struct meson_nand_ecc {
u32 bch;
u32 strength;
u32 size;
};
struct meson_nfc_data {
const struct nand_ecc_caps *ecc_caps;
};
struct meson_nfc_param {
u32 chip_select;
u32 rb_select;
};
struct nand_rw_cmd {
u32 cmd0;
u32 addrs[MAX_CYCLE_ADDRS];
u32 cmd1;
};
struct nand_timing {
u32 twb;
u32 tadl;
u32 tbers_max;
};
struct meson_nfc {
struct nand_controller controller;
struct clk *core_clk;
struct clk *device_clk;
struct clk *nand_clk;
struct clk_divider nand_divider;
unsigned long clk_rate;
u32 bus_timing;
struct device *dev;
void __iomem *reg_base;
void __iomem *reg_clk;
struct completion completion;
struct list_head chips;
const struct meson_nfc_data *data;
struct meson_nfc_param param;
struct nand_timing timing;
union {
int cmd[32];
struct nand_rw_cmd rw;
} cmdfifo;
dma_addr_t daddr;
dma_addr_t iaddr;
u32 info_bytes;
unsigned long assigned_cs;
bool no_rb_pin;
};
enum {
NFC_ECC_BCH8_512 = 1,
NFC_ECC_BCH8_1K,
NFC_ECC_BCH24_1K,
NFC_ECC_BCH30_1K,
NFC_ECC_BCH40_1K,
NFC_ECC_BCH50_1K,
NFC_ECC_BCH60_1K,
};
#define MESON_ECC_DATA(b, s, sz) { .bch = (b), .strength = (s), .size = (sz) }
static struct meson_nand_ecc meson_ecc[] = {
MESON_ECC_DATA(NFC_ECC_BCH8_512, 8, 512),
MESON_ECC_DATA(NFC_ECC_BCH8_1K, 8, 1024),
MESON_ECC_DATA(NFC_ECC_BCH24_1K, 24, 1024),
MESON_ECC_DATA(NFC_ECC_BCH30_1K, 30, 1024),
MESON_ECC_DATA(NFC_ECC_BCH40_1K, 40, 1024),
MESON_ECC_DATA(NFC_ECC_BCH50_1K, 50, 1024),
MESON_ECC_DATA(NFC_ECC_BCH60_1K, 60, 1024),
};
static int meson_nand_calc_ecc_bytes(int step_size, int strength)
{
int ecc_bytes;
if (step_size == 512 && strength == 8)
return ECC_PARITY_BCH8_512B;
ecc_bytes = DIV_ROUND_UP(strength * fls(step_size * 8), 8);
ecc_bytes = ALIGN(ecc_bytes, 2);
return ecc_bytes;
}
NAND_ECC_CAPS_SINGLE(meson_gxl_ecc_caps,
meson_nand_calc_ecc_bytes, 1024, 8, 24, 30, 40, 50, 60);
static const int axg_stepinfo_strengths[] = { 8 };
static const struct nand_ecc_step_info axg_stepinfo[] = {
{
.stepsize = 1024,
.strengths = axg_stepinfo_strengths,
.nstrengths = ARRAY_SIZE(axg_stepinfo_strengths)
},
{
.stepsize = 512,
.strengths = axg_stepinfo_strengths,
.nstrengths = ARRAY_SIZE(axg_stepinfo_strengths)
},
};
static const struct nand_ecc_caps meson_axg_ecc_caps = {
.stepinfos = axg_stepinfo,
.nstepinfos = ARRAY_SIZE(axg_stepinfo),
.calc_ecc_bytes = meson_nand_calc_ecc_bytes,
};
static struct meson_nfc_nand_chip *to_meson_nand(struct nand_chip *nand)
{
return container_of(nand, struct meson_nfc_nand_chip, nand);
}
static void meson_nfc_select_chip(struct nand_chip *nand, int chip)
{
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
struct meson_nfc *nfc = nand_get_controller_data(nand);
int ret, value;
if (chip < 0 || WARN_ON_ONCE(chip >= meson_chip->nsels))
return;
nfc->param.chip_select = meson_chip->sels[chip] ? NAND_CE1 : NAND_CE0;
nfc->param.rb_select = nfc->param.chip_select;
nfc->timing.twb = meson_chip->twb;
nfc->timing.tadl = meson_chip->tadl;
nfc->timing.tbers_max = meson_chip->tbers_max;
if (nfc->clk_rate != meson_chip->clk_rate) {
ret = clk_set_rate(nfc->nand_clk, meson_chip->clk_rate);
if (ret) {
dev_err(nfc->dev, "failed to set clock rate\n");
return;
}
nfc->clk_rate = meson_chip->clk_rate;
}
if (nfc->bus_timing != meson_chip->bus_timing) {
value = (NFC_CLK_CYCLE - 1) | (meson_chip->bus_timing << 5);
writel(value, nfc->reg_base + NFC_REG_CFG);
writel((1 << 31), nfc->reg_base + NFC_REG_CMD);
nfc->bus_timing = meson_chip->bus_timing;
}
}
static void meson_nfc_cmd_idle(struct meson_nfc *nfc, u32 time)
{
writel(nfc->param.chip_select | NFC_CMD_IDLE | (time & 0x3ff),
nfc->reg_base + NFC_REG_CMD);
}
static void meson_nfc_cmd_seed(struct meson_nfc *nfc, u32 seed)
{
writel(NFC_CMD_SEED | (0xc2 + (seed & 0x7fff)),
nfc->reg_base + NFC_REG_CMD);
}
static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir,
int scrambler)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct meson_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
u32 bch = meson_chip->bch_mode, cmd;
int len = mtd->writesize, pagesize, pages;
pagesize = nand->ecc.size;
if (raw) {
len = mtd->writesize + mtd->oobsize;
cmd = len | scrambler | DMA_DIR(dir);
writel(cmd, nfc->reg_base + NFC_REG_CMD);
return;
}
pages = len / nand->ecc.size;
cmd = CMDRWGEN(DMA_DIR(dir), scrambler, bch,
NFC_CMD_SHORTMODE_DISABLE, pagesize, pages);
writel(cmd, nfc->reg_base + NFC_REG_CMD);
}
static void meson_nfc_drain_cmd(struct meson_nfc *nfc)
{
/*
* Insert two commands to make sure all valid commands are finished.
*
* The Nand flash controller is designed as two stages pipleline -
* a) fetch and b) excute.
* There might be cases when the driver see command queue is empty,
* but the Nand flash controller still has two commands buffered,
* one is fetched into NFC request queue (ready to run), and another
* is actively executing. So pushing 2 "IDLE" commands guarantees that
* the pipeline is emptied.
*/
meson_nfc_cmd_idle(nfc, 0);
meson_nfc_cmd_idle(nfc, 0);
}
static int meson_nfc_wait_cmd_finish(struct meson_nfc *nfc,
unsigned int timeout_ms)
{
u32 cmd_size = 0;
int ret;
/* wait cmd fifo is empty */
ret = readl_relaxed_poll_timeout(nfc->reg_base + NFC_REG_CMD, cmd_size,
!NFC_CMD_GET_SIZE(cmd_size),
10, timeout_ms * 1000);
if (ret)
dev_err(nfc->dev, "wait for empty CMD FIFO time out\n");
return ret;
}
static int meson_nfc_wait_dma_finish(struct meson_nfc *nfc)
{
meson_nfc_drain_cmd(nfc);
return meson_nfc_wait_cmd_finish(nfc, DMA_BUSY_TIMEOUT);
}
static u8 *meson_nfc_oob_ptr(struct nand_chip *nand, int i)
{
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
int len;
len = nand->ecc.size * (i + 1) + (nand->ecc.bytes + 2) * i;
return meson_chip->data_buf + len;
}
static u8 *meson_nfc_data_ptr(struct nand_chip *nand, int i)
{
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
int len, temp;
temp = nand->ecc.size + nand->ecc.bytes;
len = (temp + 2) * i;
return meson_chip->data_buf + len;
}
static void meson_nfc_get_data_oob(struct nand_chip *nand,
u8 *buf, u8 *oobbuf)
{
int i, oob_len = 0;
u8 *dsrc, *osrc;
oob_len = nand->ecc.bytes + 2;
for (i = 0; i < nand->ecc.steps; i++) {
if (buf) {
dsrc = meson_nfc_data_ptr(nand, i);
memcpy(buf, dsrc, nand->ecc.size);
buf += nand->ecc.size;
}
osrc = meson_nfc_oob_ptr(nand, i);
memcpy(oobbuf, osrc, oob_len);
oobbuf += oob_len;
}
}
static void meson_nfc_set_data_oob(struct nand_chip *nand,
const u8 *buf, u8 *oobbuf)
{
int i, oob_len = 0;
u8 *dsrc, *osrc;
oob_len = nand->ecc.bytes + 2;
for (i = 0; i < nand->ecc.steps; i++) {
if (buf) {
dsrc = meson_nfc_data_ptr(nand, i);
memcpy(dsrc, buf, nand->ecc.size);
buf += nand->ecc.size;
}
osrc = meson_nfc_oob_ptr(nand, i);
memcpy(osrc, oobbuf, oob_len);
oobbuf += oob_len;
}
}
static int meson_nfc_wait_no_rb_pin(struct nand_chip *nand, int timeout_ms,
bool need_cmd_read0)
{
struct meson_nfc *nfc = nand_get_controller_data(nand);
u32 cmd, cfg;
meson_nfc_cmd_idle(nfc, nfc->timing.twb);
meson_nfc_drain_cmd(nfc);
meson_nfc_wait_cmd_finish(nfc, CMD_FIFO_EMPTY_TIMEOUT);
cfg = readl(nfc->reg_base + NFC_REG_CFG);
cfg |= NFC_RB_IRQ_EN;
writel(cfg, nfc->reg_base + NFC_REG_CFG);
reinit_completion(&nfc->completion);
nand_status_op(nand, NULL);
/* use the max erase time as the maximum clock for waiting R/B */
cmd = NFC_CMD_RB | NFC_CMD_RB_INT_NO_PIN | nfc->timing.tbers_max;
writel(cmd, nfc->reg_base + NFC_REG_CMD);
if (!wait_for_completion_timeout(&nfc->completion,
msecs_to_jiffies(timeout_ms)))
return -ETIMEDOUT;
if (need_cmd_read0)
nand_exit_status_op(nand);
return 0;
}
static int meson_nfc_wait_rb_pin(struct meson_nfc *nfc, int timeout_ms)
{
u32 cmd, cfg;
int ret = 0;
meson_nfc_cmd_idle(nfc, nfc->timing.twb);
meson_nfc_drain_cmd(nfc);
meson_nfc_wait_cmd_finish(nfc, CMD_FIFO_EMPTY_TIMEOUT);
cfg = readl(nfc->reg_base + NFC_REG_CFG);
cfg |= NFC_RB_IRQ_EN;
writel(cfg, nfc->reg_base + NFC_REG_CFG);
reinit_completion(&nfc->completion);
/* use the max erase time as the maximum clock for waiting R/B */
cmd = NFC_CMD_RB | NFC_CMD_RB_INT
| nfc->param.chip_select | nfc->timing.tbers_max;
writel(cmd, nfc->reg_base + NFC_REG_CMD);
ret = wait_for_completion_timeout(&nfc->completion,
msecs_to_jiffies(timeout_ms));
if (ret == 0)
ret = -1;
return ret;
}
static int meson_nfc_queue_rb(struct nand_chip *nand, int timeout_ms,
bool need_cmd_read0)
{
struct meson_nfc *nfc = nand_get_controller_data(nand);
if (nfc->no_rb_pin) {
/* This mode is used when there is no wired R/B pin.
* It works like 'nand_soft_waitrdy()', but instead of
* polling NAND_CMD_STATUS bit in the software loop,
* it will wait for interrupt - controllers checks IO
* bus and when it detects NAND_CMD_STATUS on it, it
* raises interrupt. After interrupt, NAND_CMD_READ0 is
* sent as terminator of the ready waiting procedure if
* needed (for all cases except page programming - this
* is reason of 'need_cmd_read0' flag).
*/
return meson_nfc_wait_no_rb_pin(nand, timeout_ms,
need_cmd_read0);
} else {
return meson_nfc_wait_rb_pin(nfc, timeout_ms);
}
}
static void meson_nfc_set_user_byte(struct nand_chip *nand, u8 *oob_buf)
{
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
__le64 *info;
int i, count;
for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) {
info = &meson_chip->info_buf[i];
*info |= oob_buf[count];
*info |= oob_buf[count + 1] << 8;
}
}
static void meson_nfc_get_user_byte(struct nand_chip *nand, u8 *oob_buf)
{
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
__le64 *info;
int i, count;
for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) {
info = &meson_chip->info_buf[i];
oob_buf[count] = *info;
oob_buf[count + 1] = *info >> 8;
}
}
static int meson_nfc_ecc_correct(struct nand_chip *nand, u32 *bitflips,
u64 *correct_bitmap)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
__le64 *info;
int ret = 0, i;
for (i = 0; i < nand->ecc.steps; i++) {
info = &meson_chip->info_buf[i];
if (ECC_ERR_CNT(*info) != ECC_UNCORRECTABLE) {
mtd->ecc_stats.corrected += ECC_ERR_CNT(*info);
*bitflips = max_t(u32, *bitflips, ECC_ERR_CNT(*info));
*correct_bitmap |= BIT_ULL(i);
continue;
}
if ((nand->options & NAND_NEED_SCRAMBLING) &&
ECC_ZERO_CNT(*info) < nand->ecc.strength) {
mtd->ecc_stats.corrected += ECC_ZERO_CNT(*info);
*bitflips = max_t(u32, *bitflips,
ECC_ZERO_CNT(*info));
ret = ECC_CHECK_RETURN_FF;
} else {
ret = -EBADMSG;
}
}
return ret;
}
static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf,
int datalen, void *infobuf, int infolen,
enum dma_data_direction dir)
{
struct meson_nfc *nfc = nand_get_controller_data(nand);
u32 cmd;
int ret = 0;
nfc->daddr = dma_map_single(nfc->dev, databuf, datalen, dir);
ret = dma_mapping_error(nfc->dev, nfc->daddr);
if (ret) {
dev_err(nfc->dev, "DMA mapping error\n");
return ret;
}
cmd = GENCMDDADDRL(NFC_CMD_ADL, nfc->daddr);
writel(cmd, nfc->reg_base + NFC_REG_CMD);
cmd = GENCMDDADDRH(NFC_CMD_ADH, nfc->daddr);
writel(cmd, nfc->reg_base + NFC_REG_CMD);
if (infobuf) {
nfc->iaddr = dma_map_single(nfc->dev, infobuf, infolen, dir);
ret = dma_mapping_error(nfc->dev, nfc->iaddr);
if (ret) {
dev_err(nfc->dev, "DMA mapping error\n");
dma_unmap_single(nfc->dev,
nfc->daddr, datalen, dir);
return ret;
}
nfc->info_bytes = infolen;
cmd = GENCMDIADDRL(NFC_CMD_AIL, nfc->iaddr);
writel(cmd, nfc->reg_base + NFC_REG_CMD);
cmd = GENCMDIADDRH(NFC_CMD_AIH, nfc->iaddr);
writel(cmd, nfc->reg_base + NFC_REG_CMD);
}
return ret;
}
static void meson_nfc_dma_buffer_release(struct nand_chip *nand,
int datalen, int infolen,
enum dma_data_direction dir)
{
struct meson_nfc *nfc = nand_get_controller_data(nand);
dma_unmap_single(nfc->dev, nfc->daddr, datalen, dir);
if (infolen) {
dma_unmap_single(nfc->dev, nfc->iaddr, infolen, dir);
nfc->info_bytes = 0;
}
}
static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
{
struct meson_nfc *nfc = nand_get_controller_data(nand);
int ret = 0;
u32 cmd;
u8 *info;
info = kzalloc(PER_INFO_BYTE, GFP_KERNEL);
if (!info)
return -ENOMEM;
ret = meson_nfc_dma_buffer_setup(nand, buf, len, info,
PER_INFO_BYTE, DMA_FROM_DEVICE);
if (ret)
goto out;
cmd = NFC_CMD_N2M | len;
writel(cmd, nfc->reg_base + NFC_REG_CMD);
meson_nfc_drain_cmd(nfc);
meson_nfc_wait_cmd_finish(nfc, 1000);
meson_nfc_dma_buffer_release(nand, len, PER_INFO_BYTE, DMA_FROM_DEVICE);
out:
kfree(info);
return ret;
}
static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len)
{
struct meson_nfc *nfc = nand_get_controller_data(nand);
int ret = 0;
u32 cmd;
ret = meson_nfc_dma_buffer_setup(nand, buf, len, NULL,
0, DMA_TO_DEVICE);
if (ret)
return ret;
cmd = NFC_CMD_M2N | len;
writel(cmd, nfc->reg_base + NFC_REG_CMD);
meson_nfc_drain_cmd(nfc);
meson_nfc_wait_cmd_finish(nfc, 1000);
meson_nfc_dma_buffer_release(nand, len, 0, DMA_TO_DEVICE);
return ret;
}
static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand,
int page, bool in)
{
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(nand_get_interface_config(nand));
struct mtd_info *mtd = nand_to_mtd(nand);
struct meson_nfc *nfc = nand_get_controller_data(nand);
u32 *addrs = nfc->cmdfifo.rw.addrs;
u32 cs = nfc->param.chip_select;
u32 cmd0, cmd_num, row_start;
int i;
cmd_num = sizeof(struct nand_rw_cmd) / sizeof(int);
cmd0 = in ? NAND_CMD_READ0 : NAND_CMD_SEQIN;
nfc->cmdfifo.rw.cmd0 = cs | NFC_CMD_CLE | cmd0;
addrs[0] = cs | NFC_CMD_ALE | NFC_COLUMN_ADDR_0;
if (mtd->writesize <= 512) {
cmd_num--;
row_start = 1;
} else {
addrs[1] = cs | NFC_CMD_ALE | NFC_COLUMN_ADDR_1;
row_start = 2;
}
addrs[row_start] = cs | NFC_CMD_ALE | ROW_ADDER(page, 0);
addrs[row_start + 1] = cs | NFC_CMD_ALE | ROW_ADDER(page, 1);
if (nand->options & NAND_ROW_ADDR_3)
addrs[row_start + 2] =
cs | NFC_CMD_ALE | ROW_ADDER(page, 2);
else
cmd_num--;
/* subtract cmd1 */
cmd_num--;
for (i = 0; i < cmd_num; i++)
writel_relaxed(nfc->cmdfifo.cmd[i],
nfc->reg_base + NFC_REG_CMD);
if (in) {
nfc->cmdfifo.rw.cmd1 = cs | NFC_CMD_CLE | NAND_CMD_READSTART;
writel(nfc->cmdfifo.rw.cmd1, nfc->reg_base + NFC_REG_CMD);
meson_nfc_queue_rb(nand, PSEC_TO_MSEC(sdr->tR_max), true);
} else {
meson_nfc_cmd_idle(nfc, nfc->timing.tadl);
}
return 0;
}
static int meson_nfc_write_page_sub(struct nand_chip *nand,
int page, int raw)
{
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(nand_get_interface_config(nand));
struct mtd_info *mtd = nand_to_mtd(nand);
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
struct meson_nfc *nfc = nand_get_controller_data(nand);
int data_len, info_len;
u32 cmd;
int ret;
meson_nfc_select_chip(nand, nand->cur_cs);
data_len = mtd->writesize + mtd->oobsize;
info_len = nand->ecc.steps * PER_INFO_BYTE;
ret = meson_nfc_rw_cmd_prepare_and_execute(nand, page, DIRWRITE);
if (ret)
return ret;
ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf,
data_len, meson_chip->info_buf,
info_len, DMA_TO_DEVICE);
if (ret)
return ret;
if (nand->options & NAND_NEED_SCRAMBLING) {
meson_nfc_cmd_seed(nfc, page);
meson_nfc_cmd_access(nand, raw, DIRWRITE,
NFC_CMD_SCRAMBLER_ENABLE);
} else {
meson_nfc_cmd_access(nand, raw, DIRWRITE,
NFC_CMD_SCRAMBLER_DISABLE);
}
cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_PAGEPROG;
writel(cmd, nfc->reg_base + NFC_REG_CMD);
meson_nfc_queue_rb(nand, PSEC_TO_MSEC(sdr->tPROG_max), false);
meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_TO_DEVICE);
return ret;
}
static int meson_nfc_write_page_raw(struct nand_chip *nand, const u8 *buf,
int oob_required, int page)
{
u8 *oob_buf = nand->oob_poi;
meson_nfc_set_data_oob(nand, buf, oob_buf);
return meson_nfc_write_page_sub(nand, page, 1);
}
static int meson_nfc_write_page_hwecc(struct nand_chip *nand,
const u8 *buf, int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
u8 *oob_buf = nand->oob_poi;
memcpy(meson_chip->data_buf, buf, mtd->writesize);
memset(meson_chip->info_buf, 0, nand->ecc.steps * PER_INFO_BYTE);
meson_nfc_set_user_byte(nand, oob_buf);
return meson_nfc_write_page_sub(nand, page, 0);
}
static void meson_nfc_check_ecc_pages_valid(struct meson_nfc *nfc,
struct nand_chip *nand, int raw)
{
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
__le64 *info;
u32 neccpages;
int ret;
neccpages = raw ? 1 : nand->ecc.steps;
info = &meson_chip->info_buf[neccpages - 1];
do {
usleep_range(10, 15);
/* info is updated by nfc dma engine*/
smp_rmb();
dma_sync_single_for_cpu(nfc->dev, nfc->iaddr, nfc->info_bytes,
DMA_FROM_DEVICE);
ret = *info & ECC_COMPLETE;
} while (!ret);
}
static int meson_nfc_read_page_sub(struct nand_chip *nand,
int page, int raw)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct meson_nfc *nfc = nand_get_controller_data(nand);
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
int data_len, info_len;
int ret;
meson_nfc_select_chip(nand, nand->cur_cs);
data_len = mtd->writesize + mtd->oobsize;
info_len = nand->ecc.steps * PER_INFO_BYTE;
ret = meson_nfc_rw_cmd_prepare_and_execute(nand, page, DIRREAD);
if (ret)
return ret;
ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf,
data_len, meson_chip->info_buf,
info_len, DMA_FROM_DEVICE);
if (ret)
return ret;
if (nand->options & NAND_NEED_SCRAMBLING) {
meson_nfc_cmd_seed(nfc, page);
meson_nfc_cmd_access(nand, raw, DIRREAD,
NFC_CMD_SCRAMBLER_ENABLE);
} else {
meson_nfc_cmd_access(nand, raw, DIRREAD,
NFC_CMD_SCRAMBLER_DISABLE);
}
ret = meson_nfc_wait_dma_finish(nfc);
meson_nfc_check_ecc_pages_valid(nfc, nand, raw);
meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_FROM_DEVICE);
return ret;
}
static int meson_nfc_read_page_raw(struct nand_chip *nand, u8 *buf,
int oob_required, int page)
{
u8 *oob_buf = nand->oob_poi;
int ret;
ret = meson_nfc_read_page_sub(nand, page, 1);
if (ret)
return ret;
meson_nfc_get_data_oob(nand, buf, oob_buf);
return 0;
}
static int meson_nfc_read_page_hwecc(struct nand_chip *nand, u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
struct nand_ecc_ctrl *ecc = &nand->ecc;
u64 correct_bitmap = 0;
u32 bitflips = 0;
u8 *oob_buf = nand->oob_poi;
int ret, i;
ret = meson_nfc_read_page_sub(nand, page, 0);
if (ret)
return ret;
meson_nfc_get_user_byte(nand, oob_buf);
ret = meson_nfc_ecc_correct(nand, &bitflips, &correct_bitmap);
if (ret == ECC_CHECK_RETURN_FF) {
if (buf)
memset(buf, 0xff, mtd->writesize);
memset(oob_buf, 0xff, mtd->oobsize);
} else if (ret < 0) {
if ((nand->options & NAND_NEED_SCRAMBLING) || !buf) {
mtd->ecc_stats.failed++;
return bitflips;
}
ret = meson_nfc_read_page_raw(nand, buf, 0, page);
if (ret)
return ret;
for (i = 0; i < nand->ecc.steps ; i++) {
u8 *data = buf + i * ecc->size;
u8 *oob = nand->oob_poi + i * (ecc->bytes + 2);
if (correct_bitmap & BIT_ULL(i))
continue;
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 2,
NULL, 0,
ecc->strength);
if (ret < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += ret;
bitflips = max_t(u32, bitflips, ret);
}
}
} else if (buf && buf != meson_chip->data_buf) {
memcpy(buf, meson_chip->data_buf, mtd->writesize);
}
return bitflips;
}
static int meson_nfc_read_oob_raw(struct nand_chip *nand, int page)
{
return meson_nfc_read_page_raw(nand, NULL, 1, page);
}
static int meson_nfc_read_oob(struct nand_chip *nand, int page)
{
return meson_nfc_read_page_hwecc(nand, NULL, 1, page);
}
static bool meson_nfc_is_buffer_dma_safe(const void *buffer)
{
if ((uintptr_t)buffer % DMA_ADDR_ALIGN)
return false;
if (virt_addr_valid(buffer) && (!object_is_on_stack(buffer)))
return true;
return false;
}
static void *
meson_nand_op_get_dma_safe_input_buf(const struct nand_op_instr *instr)
{
if (WARN_ON(instr->type != NAND_OP_DATA_IN_INSTR))
return NULL;
if (meson_nfc_is_buffer_dma_safe(instr->ctx.data.buf.in))
return instr->ctx.data.buf.in;
return kzalloc(instr->ctx.data.len, GFP_KERNEL);
}
static void
meson_nand_op_put_dma_safe_input_buf(const struct nand_op_instr *instr,
void *buf)
{
if (WARN_ON(instr->type != NAND_OP_DATA_IN_INSTR) ||
WARN_ON(!buf))
return;
if (buf == instr->ctx.data.buf.in)
return;
memcpy(instr->ctx.data.buf.in, buf, instr->ctx.data.len);
kfree(buf);
}
static void *
meson_nand_op_get_dma_safe_output_buf(const struct nand_op_instr *instr)
{
if (WARN_ON(instr->type != NAND_OP_DATA_OUT_INSTR))
return NULL;
if (meson_nfc_is_buffer_dma_safe(instr->ctx.data.buf.out))
return (void *)instr->ctx.data.buf.out;
return kmemdup(instr->ctx.data.buf.out,
instr->ctx.data.len, GFP_KERNEL);
}
static void
meson_nand_op_put_dma_safe_output_buf(const struct nand_op_instr *instr,
const void *buf)
{
if (WARN_ON(instr->type != NAND_OP_DATA_OUT_INSTR) ||
WARN_ON(!buf))
return;
if (buf != instr->ctx.data.buf.out)
kfree(buf);
}
static int meson_nfc_check_op(struct nand_chip *chip,
const struct nand_operation *op)
{
int op_id;
for (op_id = 0; op_id < op->ninstrs; op_id++) {
const struct nand_op_instr *instr;
instr = &op->instrs[op_id];
switch (instr->type) {
case NAND_OP_DATA_IN_INSTR:
case NAND_OP_DATA_OUT_INSTR:
if (instr->ctx.data.len > NFC_CMD_RAW_LEN)
return -ENOTSUPP;
break;
default:
break;
}
}
return 0;
}
static int meson_nfc_exec_op(struct nand_chip *nand,
const struct nand_operation *op, bool check_only)
{
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
struct meson_nfc *nfc = nand_get_controller_data(nand);
const struct nand_op_instr *instr = NULL;
void *buf;
u32 op_id, delay_idle, cmd;
int err;
int i;
err = meson_nfc_check_op(nand, op);
if (err)
return err;
if (check_only)
return 0;
meson_nfc_select_chip(nand, op->cs);
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
delay_idle = DIV_ROUND_UP(PSEC_TO_NSEC(instr->delay_ns),
meson_chip->level1_divider *
NFC_CLK_CYCLE);
switch (instr->type) {
case NAND_OP_CMD_INSTR:
cmd = nfc->param.chip_select | NFC_CMD_CLE;
cmd |= instr->ctx.cmd.opcode & 0xff;
writel(cmd, nfc->reg_base + NFC_REG_CMD);
meson_nfc_cmd_idle(nfc, delay_idle);
break;
case NAND_OP_ADDR_INSTR:
for (i = 0; i < instr->ctx.addr.naddrs; i++) {
cmd = nfc->param.chip_select | NFC_CMD_ALE;
cmd |= instr->ctx.addr.addrs[i] & 0xff;
writel(cmd, nfc->reg_base + NFC_REG_CMD);
}
meson_nfc_cmd_idle(nfc, delay_idle);
break;
case NAND_OP_DATA_IN_INSTR:
buf = meson_nand_op_get_dma_safe_input_buf(instr);
if (!buf)
return -ENOMEM;
meson_nfc_read_buf(nand, buf, instr->ctx.data.len);
meson_nand_op_put_dma_safe_input_buf(instr, buf);
break;
case NAND_OP_DATA_OUT_INSTR:
buf = meson_nand_op_get_dma_safe_output_buf(instr);
if (!buf)
return -ENOMEM;
meson_nfc_write_buf(nand, buf, instr->ctx.data.len);
meson_nand_op_put_dma_safe_output_buf(instr, buf);
break;
case NAND_OP_WAITRDY_INSTR:
meson_nfc_queue_rb(nand, instr->ctx.waitrdy.timeout_ms,
true);
if (instr->delay_ns)
meson_nfc_cmd_idle(nfc, delay_idle);
break;
}
}
meson_nfc_wait_cmd_finish(nfc, 1000);
return 0;
}
static int meson_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *nand = mtd_to_nand(mtd);
if (section >= nand->ecc.steps)
return -ERANGE;
oobregion->offset = 2 + (section * (2 + nand->ecc.bytes));
oobregion->length = nand->ecc.bytes;
return 0;
}
static int meson_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
struct nand_chip *nand = mtd_to_nand(mtd);
if (section >= nand->ecc.steps)
return -ERANGE;
oobregion->offset = section * (2 + nand->ecc.bytes);
oobregion->length = 2;
return 0;
}
static const struct mtd_ooblayout_ops meson_ooblayout_ops = {
.ecc = meson_ooblayout_ecc,
.free = meson_ooblayout_free,
};
static int meson_nfc_clk_init(struct meson_nfc *nfc)
{
struct clk_parent_data nfc_divider_parent_data[1] = {0};
struct clk_init_data init = {0};
int ret;
/* request core clock */
nfc->core_clk = devm_clk_get(nfc->dev, "core");
if (IS_ERR(nfc->core_clk)) {
dev_err(nfc->dev, "failed to get core clock\n");
return PTR_ERR(nfc->core_clk);
}
nfc->device_clk = devm_clk_get(nfc->dev, "device");
if (IS_ERR(nfc->device_clk)) {
dev_err(nfc->dev, "failed to get device clock\n");
return PTR_ERR(nfc->device_clk);
}
init.name = devm_kasprintf(nfc->dev,
GFP_KERNEL, "%s#div",
dev_name(nfc->dev));
init.ops = &clk_divider_ops;
nfc_divider_parent_data[0].fw_name = "device";
init.parent_data = nfc_divider_parent_data;
init.num_parents = 1;
nfc->nand_divider.reg = nfc->reg_clk;
nfc->nand_divider.shift = CLK_DIV_SHIFT;
nfc->nand_divider.width = CLK_DIV_WIDTH;
nfc->nand_divider.hw.init = &init;
nfc->nand_divider.flags = CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ROUND_CLOSEST |
CLK_DIVIDER_ALLOW_ZERO;
nfc->nand_clk = devm_clk_register(nfc->dev, &nfc->nand_divider.hw);
if (IS_ERR(nfc->nand_clk))
return PTR_ERR(nfc->nand_clk);
/* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
writel(CLK_SELECT_NAND | readl(nfc->reg_clk),
nfc->reg_clk);
ret = clk_prepare_enable(nfc->core_clk);
if (ret) {
dev_err(nfc->dev, "failed to enable core clock\n");
return ret;
}
ret = clk_prepare_enable(nfc->device_clk);
if (ret) {
dev_err(nfc->dev, "failed to enable device clock\n");
goto err_device_clk;
}
ret = clk_prepare_enable(nfc->nand_clk);
if (ret) {
dev_err(nfc->dev, "pre enable NFC divider fail\n");
goto err_nand_clk;
}
ret = clk_set_rate(nfc->nand_clk, 24000000);
if (ret)
goto err_disable_clk;
return 0;
err_disable_clk:
clk_disable_unprepare(nfc->nand_clk);
err_nand_clk:
clk_disable_unprepare(nfc->device_clk);
err_device_clk:
clk_disable_unprepare(nfc->core_clk);
return ret;
}
static void meson_nfc_disable_clk(struct meson_nfc *nfc)
{
clk_disable_unprepare(nfc->nand_clk);
clk_disable_unprepare(nfc->device_clk);
clk_disable_unprepare(nfc->core_clk);
}
static void meson_nfc_free_buffer(struct nand_chip *nand)
{
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
kfree(meson_chip->info_buf);
kfree(meson_chip->data_buf);
}
static int meson_chip_buffer_init(struct nand_chip *nand)
{
struct mtd_info *mtd = nand_to_mtd(nand);
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
u32 page_bytes, info_bytes, nsectors;
nsectors = mtd->writesize / nand->ecc.size;
page_bytes = mtd->writesize + mtd->oobsize;
info_bytes = nsectors * PER_INFO_BYTE;
meson_chip->data_buf = kmalloc(page_bytes, GFP_KERNEL);
if (!meson_chip->data_buf)
return -ENOMEM;
meson_chip->info_buf = kmalloc(info_bytes, GFP_KERNEL);
if (!meson_chip->info_buf) {
kfree(meson_chip->data_buf);
return -ENOMEM;
}
return 0;
}
static
int meson_nfc_setup_interface(struct nand_chip *nand, int csline,
const struct nand_interface_config *conf)
{
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
const struct nand_sdr_timings *timings;
u32 div, bt_min, bt_max, tbers_clocks;
timings = nand_get_sdr_timings(conf);
if (IS_ERR(timings))
return -ENOTSUPP;
if (csline == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
div = DIV_ROUND_UP((timings->tRC_min / 1000), NFC_CLK_CYCLE);
bt_min = (timings->tREA_max + NFC_DEFAULT_DELAY) / div;
bt_max = (NFC_DEFAULT_DELAY + timings->tRHOH_min +
timings->tRC_min / 2) / div;
meson_chip->twb = DIV_ROUND_UP(PSEC_TO_NSEC(timings->tWB_max),
div * NFC_CLK_CYCLE);
meson_chip->tadl = DIV_ROUND_UP(PSEC_TO_NSEC(timings->tADL_min),
div * NFC_CLK_CYCLE);
tbers_clocks = DIV_ROUND_UP_ULL(PSEC_TO_NSEC(timings->tBERS_max),
div * NFC_CLK_CYCLE);
meson_chip->tbers_max = ilog2(tbers_clocks);
if (!is_power_of_2(tbers_clocks))
meson_chip->tbers_max++;
bt_min = DIV_ROUND_UP(bt_min, 1000);
bt_max = DIV_ROUND_UP(bt_max, 1000);
if (bt_max < bt_min)
return -EINVAL;
meson_chip->level1_divider = div;
meson_chip->clk_rate = 1000000000 / meson_chip->level1_divider;
meson_chip->bus_timing = (bt_min + bt_max) / 2 + 1;
return 0;
}
static int meson_nand_bch_mode(struct nand_chip *nand)
{
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
int i;
if (nand->ecc.strength > 60 || nand->ecc.strength < 8)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(meson_ecc); i++) {
if (meson_ecc[i].strength == nand->ecc.strength &&
meson_ecc[i].size == nand->ecc.size) {
meson_chip->bch_mode = meson_ecc[i].bch;
return 0;
}
}
return -EINVAL;
}
static void meson_nand_detach_chip(struct nand_chip *nand)
{
meson_nfc_free_buffer(nand);
}
static int meson_nand_attach_chip(struct nand_chip *nand)
{
struct meson_nfc *nfc = nand_get_controller_data(nand);
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
struct mtd_info *mtd = nand_to_mtd(nand);
int raw_writesize;
int ret;
if (!mtd->name) {
mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
"%s:nand%d",
dev_name(nfc->dev),
meson_chip->sels[0]);
if (!mtd->name)
return -ENOMEM;
}
raw_writesize = mtd->writesize + mtd->oobsize;
if (raw_writesize > NFC_CMD_RAW_LEN) {
dev_err(nfc->dev, "too big write size in raw mode: %d > %ld\n",
raw_writesize, NFC_CMD_RAW_LEN);
return -EINVAL;
}
if (nand->bbt_options & NAND_BBT_USE_FLASH)
nand->bbt_options |= NAND_BBT_NO_OOB;
nand->options |= NAND_NO_SUBPAGE_WRITE;
ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps,
mtd->oobsize - 2);
if (ret) {
dev_err(nfc->dev, "failed to ECC init\n");
return -EINVAL;
}
mtd_set_ooblayout(mtd, &meson_ooblayout_ops);
ret = meson_nand_bch_mode(nand);
if (ret)
return -EINVAL;
nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
nand->ecc.write_page_raw = meson_nfc_write_page_raw;
nand->ecc.write_page = meson_nfc_write_page_hwecc;
nand->ecc.write_oob_raw = nand_write_oob_std;
nand->ecc.write_oob = nand_write_oob_std;
nand->ecc.read_page_raw = meson_nfc_read_page_raw;
nand->ecc.read_page = meson_nfc_read_page_hwecc;
nand->ecc.read_oob_raw = meson_nfc_read_oob_raw;
nand->ecc.read_oob = meson_nfc_read_oob;
if (nand->options & NAND_BUSWIDTH_16) {
dev_err(nfc->dev, "16bits bus width not supported");
return -EINVAL;
}
ret = meson_chip_buffer_init(nand);
if (ret)
return -ENOMEM;
return ret;
}
static const struct nand_controller_ops meson_nand_controller_ops = {
.attach_chip = meson_nand_attach_chip,
.detach_chip = meson_nand_detach_chip,
.setup_interface = meson_nfc_setup_interface,
.exec_op = meson_nfc_exec_op,
};
static int
meson_nfc_nand_chip_init(struct device *dev,
struct meson_nfc *nfc, struct device_node *np)
{
struct meson_nfc_nand_chip *meson_chip;
struct nand_chip *nand;
struct mtd_info *mtd;
int ret, i;
u32 tmp, nsels;
u32 nand_rb_val = 0;
nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
if (!nsels || nsels > MAX_CE_NUM) {
dev_err(dev, "invalid register property size\n");
return -EINVAL;
}
meson_chip = devm_kzalloc(dev, struct_size(meson_chip, sels, nsels),
GFP_KERNEL);
if (!meson_chip)
return -ENOMEM;
meson_chip->nsels = nsels;
for (i = 0; i < nsels; i++) {
ret = of_property_read_u32_index(np, "reg", i, &tmp);
if (ret) {
dev_err(dev, "could not retrieve register property: %d\n",
ret);
return ret;
}
if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
dev_err(dev, "CS %d already assigned\n", tmp);
return -EINVAL;
}
}
nand = &meson_chip->nand;
nand->controller = &nfc->controller;
nand->controller->ops = &meson_nand_controller_ops;
nand_set_flash_node(nand, np);
nand_set_controller_data(nand, nfc);
nand->options |= NAND_USES_DMA;
mtd = nand_to_mtd(nand);
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
ret = of_property_read_u32(np, "nand-rb", &nand_rb_val);
if (ret == -EINVAL)
nfc->no_rb_pin = true;
else if (ret)
return ret;
if (nand_rb_val)
return -EINVAL;
ret = nand_scan(nand, nsels);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "failed to register MTD device: %d\n", ret);
nand_cleanup(nand);
return ret;
}
list_add_tail(&meson_chip->node, &nfc->chips);
return 0;
}
static void meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc)
{
struct meson_nfc_nand_chip *meson_chip;
struct mtd_info *mtd;
while (!list_empty(&nfc->chips)) {
meson_chip = list_first_entry(&nfc->chips,
struct meson_nfc_nand_chip, node);
mtd = nand_to_mtd(&meson_chip->nand);
WARN_ON(mtd_device_unregister(mtd));
nand_cleanup(&meson_chip->nand);
list_del(&meson_chip->node);
}
}
static int meson_nfc_nand_chips_init(struct device *dev,
struct meson_nfc *nfc)
{
struct device_node *np = dev->of_node;
struct device_node *nand_np;
int ret;
for_each_child_of_node(np, nand_np) {
ret = meson_nfc_nand_chip_init(dev, nfc, nand_np);
if (ret) {
meson_nfc_nand_chip_cleanup(nfc);
of_node_put(nand_np);
return ret;
}
}
return 0;
}
static irqreturn_t meson_nfc_irq(int irq, void *id)
{
struct meson_nfc *nfc = id;
u32 cfg;
cfg = readl(nfc->reg_base + NFC_REG_CFG);
if (!(cfg & NFC_RB_IRQ_EN))
return IRQ_NONE;
cfg &= ~(NFC_RB_IRQ_EN);
writel(cfg, nfc->reg_base + NFC_REG_CFG);
complete(&nfc->completion);
return IRQ_HANDLED;
}
static const struct meson_nfc_data meson_gxl_data = {
.ecc_caps = &meson_gxl_ecc_caps,
};
static const struct meson_nfc_data meson_axg_data = {
.ecc_caps = &meson_axg_ecc_caps,
};
static const struct of_device_id meson_nfc_id_table[] = {
{
.compatible = "amlogic,meson-gxl-nfc",
.data = &meson_gxl_data,
}, {
.compatible = "amlogic,meson-axg-nfc",
.data = &meson_axg_data,
},
{}
};
MODULE_DEVICE_TABLE(of, meson_nfc_id_table);
static int meson_nfc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_nfc *nfc;
int ret, irq;
nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
if (!nfc)
return -ENOMEM;
nfc->data = of_device_get_match_data(&pdev->dev);
if (!nfc->data)
return -ENODEV;
nand_controller_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
init_completion(&nfc->completion);
nfc->dev = dev;
nfc->reg_base = devm_platform_ioremap_resource_byname(pdev, "nfc");
if (IS_ERR(nfc->reg_base))
return PTR_ERR(nfc->reg_base);
nfc->reg_clk = devm_platform_ioremap_resource_byname(pdev, "emmc");
if (IS_ERR(nfc->reg_clk))
return PTR_ERR(nfc->reg_clk);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
ret = meson_nfc_clk_init(nfc);
if (ret) {
dev_err(dev, "failed to initialize NAND clock\n");
return ret;
}
writel(0, nfc->reg_base + NFC_REG_CFG);
ret = devm_request_irq(dev, irq, meson_nfc_irq, 0, dev_name(dev), nfc);
if (ret) {
dev_err(dev, "failed to request NFC IRQ\n");
ret = -EINVAL;
goto err_clk;
}
ret = dma_set_mask(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "failed to set DMA mask\n");
goto err_clk;
}
platform_set_drvdata(pdev, nfc);
ret = meson_nfc_nand_chips_init(dev, nfc);
if (ret) {
dev_err(dev, "failed to init NAND chips\n");
goto err_clk;
}
return 0;
err_clk:
meson_nfc_disable_clk(nfc);
return ret;
}
static void meson_nfc_remove(struct platform_device *pdev)
{
struct meson_nfc *nfc = platform_get_drvdata(pdev);
meson_nfc_nand_chip_cleanup(nfc);
meson_nfc_disable_clk(nfc);
}
static struct platform_driver meson_nfc_driver = {
.probe = meson_nfc_probe,
.remove_new = meson_nfc_remove,
.driver = {
.name = "meson-nand",
.of_match_table = meson_nfc_id_table,
},
};
module_platform_driver(meson_nfc_driver);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_AUTHOR("Liang Yang <[email protected]>");
MODULE_DESCRIPTION("Amlogic's Meson NAND Flash Controller driver");
| linux-master | drivers/mtd/nand/raw/meson_nand.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Free Electrons
* Copyright (C) 2017 NextThing Co
*
* Author: Boris Brezillon <[email protected]>
*/
#include "internals.h"
static void amd_nand_decode_id(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
memorg = nanddev_get_memorg(&chip->base);
nand_decode_ext_id(chip);
/*
* Check for Spansion/AMD ID + repeating 5th, 6th byte since
* some Spansion chips have erasesize that conflicts with size
* listed in nand_ids table.
* Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
*/
if (chip->id.data[4] != 0x00 && chip->id.data[5] == 0x00 &&
chip->id.data[6] == 0x00 && chip->id.data[7] == 0x00 &&
memorg->pagesize == 512) {
memorg->pages_per_eraseblock = 256;
memorg->pages_per_eraseblock <<= ((chip->id.data[3] & 0x03) << 1);
mtd->erasesize = memorg->pages_per_eraseblock *
memorg->pagesize;
}
}
static int amd_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
/*
* According to the datasheet of some Cypress SLC NANDs,
* the bad block markers can be in the first, second or last
* page of a block. So let's check all three locations.
*/
chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE |
NAND_BBM_LASTPAGE;
return 0;
}
const struct nand_manufacturer_ops amd_nand_manuf_ops = {
.detect = amd_nand_decode_id,
.init = amd_nand_init,
};
| linux-master | drivers/mtd/nand/raw/nand_amd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Overview:
* This is the generic MTD driver for NAND flash devices. It should be
* capable of working with almost all NAND chips currently available.
*
* Additional technical information is available on
* http://www.linux-mtd.infradead.org/doc/nand.html
*
* Copyright (C) 2000 Steven J. Hill ([email protected])
* 2002-2006 Thomas Gleixner ([email protected])
*
* Credits:
* David Woodhouse for adding multichip support
*
* Aleph One Ltd. and Toby Churchill Ltd. for supporting the
* rework for 2K page size chips
*
* TODO:
* Enable cached programming for 2k page size chips
* Check, if mtd->ecctype should be set to MTD_ECC_HW
* if we have HW ECC support.
* BBT table is not serialized, has to be fixed
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/nand-ecc-sw-bch.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include "internals.h"
static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
struct mtd_pairing_info *info)
{
int lastpage = (mtd->erasesize / mtd->writesize) - 1;
int dist = 3;
if (page == lastpage)
dist = 2;
if (!page || (page & 1)) {
info->group = 0;
info->pair = (page + 1) / 2;
} else {
info->group = 1;
info->pair = (page + 1 - dist) / 2;
}
return 0;
}
static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
const struct mtd_pairing_info *info)
{
int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
int page = info->pair * 2;
int dist = 3;
if (!info->group && !info->pair)
return 0;
if (info->pair == lastpair && info->group)
dist = 2;
if (!info->group)
page--;
else if (info->pair)
page += dist - 1;
if (page >= mtd->erasesize / mtd->writesize)
return -EINVAL;
return page;
}
const struct mtd_pairing_scheme dist3_pairing_scheme = {
.ngroups = 2,
.get_info = nand_pairing_dist3_get_info,
.get_wunit = nand_pairing_dist3_get_wunit,
};
static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
{
int ret = 0;
/* Start address must align on block boundary */
if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
pr_debug("%s: unaligned address\n", __func__);
ret = -EINVAL;
}
/* Length must align on block boundary */
if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
pr_debug("%s: length not block aligned\n", __func__);
ret = -EINVAL;
}
return ret;
}
/**
* nand_extract_bits - Copy unaligned bits from one buffer to another one
* @dst: destination buffer
* @dst_off: bit offset at which the writing starts
* @src: source buffer
* @src_off: bit offset at which the reading starts
* @nbits: number of bits to copy from @src to @dst
*
* Copy bits from one memory region to another (overlap authorized).
*/
void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
unsigned int src_off, unsigned int nbits)
{
unsigned int tmp, n;
dst += dst_off / 8;
dst_off %= 8;
src += src_off / 8;
src_off %= 8;
while (nbits) {
n = min3(8 - dst_off, 8 - src_off, nbits);
tmp = (*src >> src_off) & GENMASK(n - 1, 0);
*dst &= ~GENMASK(n - 1 + dst_off, dst_off);
*dst |= tmp << dst_off;
dst_off += n;
if (dst_off >= 8) {
dst++;
dst_off -= 8;
}
src_off += n;
if (src_off >= 8) {
src++;
src_off -= 8;
}
nbits -= n;
}
}
EXPORT_SYMBOL_GPL(nand_extract_bits);
/**
* nand_select_target() - Select a NAND target (A.K.A. die)
* @chip: NAND chip object
* @cs: the CS line to select. Note that this CS id is always from the chip
* PoV, not the controller one
*
* Select a NAND target so that further operations executed on @chip go to the
* selected NAND target.
*/
void nand_select_target(struct nand_chip *chip, unsigned int cs)
{
/*
* cs should always lie between 0 and nanddev_ntargets(), when that's
* not the case it's a bug and the caller should be fixed.
*/
if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
return;
chip->cur_cs = cs;
if (chip->legacy.select_chip)
chip->legacy.select_chip(chip, cs);
}
EXPORT_SYMBOL_GPL(nand_select_target);
/**
* nand_deselect_target() - Deselect the currently selected target
* @chip: NAND chip object
*
* Deselect the currently selected NAND target. The result of operations
* executed on @chip after the target has been deselected is undefined.
*/
void nand_deselect_target(struct nand_chip *chip)
{
if (chip->legacy.select_chip)
chip->legacy.select_chip(chip, -1);
chip->cur_cs = -1;
}
EXPORT_SYMBOL_GPL(nand_deselect_target);
/**
* nand_release_device - [GENERIC] release chip
* @chip: NAND chip object
*
* Release chip lock and wake up anyone waiting on the device.
*/
static void nand_release_device(struct nand_chip *chip)
{
/* Release the controller and the chip */
mutex_unlock(&chip->controller->lock);
mutex_unlock(&chip->lock);
}
/**
* nand_bbm_get_next_page - Get the next page for bad block markers
* @chip: NAND chip object
* @page: First page to start checking for bad block marker usage
*
* Returns an integer that corresponds to the page offset within a block, for
* a page that is used to store bad block markers. If no more pages are
* available, -EINVAL is returned.
*/
int nand_bbm_get_next_page(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int last_page = ((mtd->erasesize - mtd->writesize) >>
chip->page_shift) & chip->pagemask;
unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
| NAND_BBM_LASTPAGE;
if (page == 0 && !(chip->options & bbm_flags))
return 0;
if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
return 0;
if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
return 1;
if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
return last_page;
return -EINVAL;
}
/**
* nand_block_bad - [DEFAULT] Read bad block marker from the chip
* @chip: NAND chip object
* @ofs: offset from device start
*
* Check, if the block is bad.
*/
static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
{
int first_page, page_offset;
int res;
u8 bad;
first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
page_offset = nand_bbm_get_next_page(chip, 0);
while (page_offset >= 0) {
res = chip->ecc.read_oob(chip, first_page + page_offset);
if (res < 0)
return res;
bad = chip->oob_poi[chip->badblockpos];
if (likely(chip->badblockbits == 8))
res = bad != 0xFF;
else
res = hweight8(bad) < chip->badblockbits;
if (res)
return res;
page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
}
return 0;
}
/**
* nand_region_is_secured() - Check if the region is secured
* @chip: NAND chip object
* @offset: Offset of the region to check
* @size: Size of the region to check
*
* Checks if the region is secured by comparing the offset and size with the
* list of secure regions obtained from DT. Returns true if the region is
* secured else false.
*/
static bool nand_region_is_secured(struct nand_chip *chip, loff_t offset, u64 size)
{
int i;
/* Skip touching the secure regions if present */
for (i = 0; i < chip->nr_secure_regions; i++) {
const struct nand_secure_region *region = &chip->secure_regions[i];
if (offset + size <= region->offset ||
offset >= region->offset + region->size)
continue;
pr_debug("%s: Region 0x%llx - 0x%llx is secured!",
__func__, offset, offset + size);
return true;
}
return false;
}
static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (chip->options & NAND_NO_BBM_QUIRK)
return 0;
/* Check if the region is secured */
if (nand_region_is_secured(chip, ofs, mtd->erasesize))
return -EIO;
if (mtd_check_expert_analysis_mode())
return 0;
if (chip->legacy.block_bad)
return chip->legacy.block_bad(chip, ofs);
return nand_block_bad(chip, ofs);
}
/**
* nand_get_device - [GENERIC] Get chip for selected access
* @chip: NAND chip structure
*
* Lock the device and its controller for exclusive access
*/
static void nand_get_device(struct nand_chip *chip)
{
/* Wait until the device is resumed. */
while (1) {
mutex_lock(&chip->lock);
if (!chip->suspended) {
mutex_lock(&chip->controller->lock);
return;
}
mutex_unlock(&chip->lock);
wait_event(chip->resume_wq, !chip->suspended);
}
}
/**
* nand_check_wp - [GENERIC] check if the chip is write protected
* @chip: NAND chip object
*
* Check, if the device is write protected. The function expects, that the
* device is already selected.
*/
static int nand_check_wp(struct nand_chip *chip)
{
u8 status;
int ret;
/* Broken xD cards report WP despite being writable */
if (chip->options & NAND_BROKEN_XD)
return 0;
/* Check the WP bit */
ret = nand_status_op(chip, &status);
if (ret)
return ret;
return status & NAND_STATUS_WP ? 0 : 1;
}
/**
* nand_fill_oob - [INTERN] Transfer client buffer to oob
* @chip: NAND chip object
* @oob: oob data buffer
* @len: oob data write length
* @ops: oob ops structure
*/
static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
struct mtd_oob_ops *ops)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
/*
* Initialise to all 0xFF, to avoid the possibility of left over OOB
* data from a previous OOB read.
*/
memset(chip->oob_poi, 0xff, mtd->oobsize);
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
case MTD_OPS_RAW:
memcpy(chip->oob_poi + ops->ooboffs, oob, len);
return oob + len;
case MTD_OPS_AUTO_OOB:
ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
ops->ooboffs, len);
BUG_ON(ret);
return oob + len;
default:
BUG();
}
return NULL;
}
/**
* nand_do_write_oob - [MTD Interface] NAND write out-of-band
* @chip: NAND chip object
* @to: offset to write to
* @ops: oob operation description structure
*
* NAND write out-of-band.
*/
static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
struct mtd_oob_ops *ops)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int chipnr, page, status, len, ret;
pr_debug("%s: to = 0x%08x, len = %i\n",
__func__, (unsigned int)to, (int)ops->ooblen);
len = mtd_oobavail(mtd, ops);
/* Do not allow write past end of page */
if ((ops->ooboffs + ops->ooblen) > len) {
pr_debug("%s: attempt to write past end of page\n",
__func__);
return -EINVAL;
}
/* Check if the region is secured */
if (nand_region_is_secured(chip, to, ops->ooblen))
return -EIO;
chipnr = (int)(to >> chip->chip_shift);
/*
* Reset the chip. Some chips (like the Toshiba TC5832DC found in one
* of my DiskOnChip 2000 test units) will clear the whole data page too
* if we don't do this. I have no clue why, but I seem to have 'fixed'
* it in the doc2000 driver in August 1999. dwmw2.
*/
ret = nand_reset(chip, chipnr);
if (ret)
return ret;
nand_select_target(chip, chipnr);
/* Shift to get page */
page = (int)(to >> chip->page_shift);
/* Check, if it is write protected */
if (nand_check_wp(chip)) {
nand_deselect_target(chip);
return -EROFS;
}
/* Invalidate the page cache, if we write to the cached page */
if (page == chip->pagecache.page)
chip->pagecache.page = -1;
nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
if (ops->mode == MTD_OPS_RAW)
status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
else
status = chip->ecc.write_oob(chip, page & chip->pagemask);
nand_deselect_target(chip);
if (status)
return status;
ops->oobretlen = ops->ooblen;
return 0;
}
/**
* nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
* @chip: NAND chip object
* @ofs: offset from device start
*
* This is the default implementation, which can be overridden by a hardware
* specific driver. It provides the details for writing a bad block marker to a
* block.
*/
static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mtd_oob_ops ops;
uint8_t buf[2] = { 0, 0 };
int ret = 0, res, page_offset;
memset(&ops, 0, sizeof(ops));
ops.oobbuf = buf;
ops.ooboffs = chip->badblockpos;
if (chip->options & NAND_BUSWIDTH_16) {
ops.ooboffs &= ~0x01;
ops.len = ops.ooblen = 2;
} else {
ops.len = ops.ooblen = 1;
}
ops.mode = MTD_OPS_PLACE_OOB;
page_offset = nand_bbm_get_next_page(chip, 0);
while (page_offset >= 0) {
res = nand_do_write_oob(chip,
ofs + (page_offset * mtd->writesize),
&ops);
if (!ret)
ret = res;
page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
}
return ret;
}
/**
* nand_markbad_bbm - mark a block by updating the BBM
* @chip: NAND chip object
* @ofs: offset of the block to mark bad
*/
int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
{
if (chip->legacy.block_markbad)
return chip->legacy.block_markbad(chip, ofs);
return nand_default_block_markbad(chip, ofs);
}
/**
* nand_block_markbad_lowlevel - mark a block bad
* @chip: NAND chip object
* @ofs: offset from device start
*
* This function performs the generic NAND bad block marking steps (i.e., bad
* block table(s) and/or marker(s)). We only allow the hardware driver to
* specify how to write bad block markers to OOB (chip->legacy.block_markbad).
*
* We try operations in the following order:
*
* (1) erase the affected block, to allow OOB marker to be written cleanly
* (2) write bad block marker to OOB area of affected block (unless flag
* NAND_BBT_NO_OOB_BBM is present)
* (3) update the BBT
*
* Note that we retain the first error encountered in (2) or (3), finish the
* procedures, and dump the error in the end.
*/
static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int res, ret = 0;
if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
struct erase_info einfo;
/* Attempt erase before marking OOB */
memset(&einfo, 0, sizeof(einfo));
einfo.addr = ofs;
einfo.len = 1ULL << chip->phys_erase_shift;
nand_erase_nand(chip, &einfo, 0);
/* Write bad block marker to OOB */
nand_get_device(chip);
ret = nand_markbad_bbm(chip, ofs);
nand_release_device(chip);
}
/* Mark block bad in BBT */
if (chip->bbt) {
res = nand_markbad_bbt(chip, ofs);
if (!ret)
ret = res;
}
if (!ret)
mtd->ecc_stats.badblocks++;
return ret;
}
/**
* nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
* @mtd: MTD device structure
* @ofs: offset from device start
*
* Check if the block is marked as reserved.
*/
static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (!chip->bbt)
return 0;
/* Return info from the table */
return nand_isreserved_bbt(chip, ofs);
}
/**
* nand_block_checkbad - [GENERIC] Check if a block is marked bad
* @chip: NAND chip object
* @ofs: offset from device start
* @allowbbt: 1, if its allowed to access the bbt area
*
* Check, if the block is bad. Either by reading the bad block table or
* calling of the scan function.
*/
static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
{
/* Return info from the table */
if (chip->bbt)
return nand_isbad_bbt(chip, ofs, allowbbt);
return nand_isbad_bbm(chip, ofs);
}
/**
* nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
* @chip: NAND chip structure
* @timeout_ms: Timeout in ms
*
* Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
* If that does not happen whitin the specified timeout, -ETIMEDOUT is
* returned.
*
* This helper is intended to be used when the controller does not have access
* to the NAND R/B pin.
*
* Be aware that calling this helper from an ->exec_op() implementation means
* ->exec_op() must be re-entrant.
*
* Return 0 if the NAND chip is ready, a negative error otherwise.
*/
int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
{
const struct nand_interface_config *conf;
u8 status = 0;
int ret;
if (!nand_has_exec_op(chip))
return -ENOTSUPP;
/* Wait tWB before polling the STATUS reg. */
conf = nand_get_interface_config(chip);
ndelay(NAND_COMMON_TIMING_NS(conf, tWB_max));
ret = nand_status_op(chip, NULL);
if (ret)
return ret;
/*
* +1 below is necessary because if we are now in the last fraction
* of jiffy and msecs_to_jiffies is 1 then we will wait only that
* small jiffy fraction - possibly leading to false timeout
*/
timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
do {
ret = nand_read_data_op(chip, &status, sizeof(status), true,
false);
if (ret)
break;
if (status & NAND_STATUS_READY)
break;
/*
* Typical lowest execution time for a tR on most NANDs is 10us,
* use this as polling delay before doing something smarter (ie.
* deriving a delay from the timeout value, timeout_ms/ratio).
*/
udelay(10);
} while (time_before(jiffies, timeout_ms));
/*
* We have to exit READ_STATUS mode in order to read real data on the
* bus in case the WAITRDY instruction is preceding a DATA_IN
* instruction.
*/
nand_exit_status_op(chip);
if (ret)
return ret;
return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
};
EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
/**
* nand_gpio_waitrdy - Poll R/B GPIO pin until ready
* @chip: NAND chip structure
* @gpiod: GPIO descriptor of R/B pin
* @timeout_ms: Timeout in ms
*
* Poll the R/B GPIO pin until it becomes ready. If that does not happen
* whitin the specified timeout, -ETIMEDOUT is returned.
*
* This helper is intended to be used when the controller has access to the
* NAND R/B pin over GPIO.
*
* Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
*/
int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
unsigned long timeout_ms)
{
/*
* Wait until R/B pin indicates chip is ready or timeout occurs.
* +1 below is necessary because if we are now in the last fraction
* of jiffy and msecs_to_jiffies is 1 then we will wait only that
* small jiffy fraction - possibly leading to false timeout.
*/
timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
do {
if (gpiod_get_value_cansleep(gpiod))
return 0;
cond_resched();
} while (time_before(jiffies, timeout_ms));
return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
};
EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
/**
* panic_nand_wait - [GENERIC] wait until the command is done
* @chip: NAND chip structure
* @timeo: timeout
*
* Wait for command done. This is a helper function for nand_wait used when
* we are in interrupt context. May happen when in panic and trying to write
* an oops through mtdoops.
*/
void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
{
int i;
for (i = 0; i < timeo; i++) {
if (chip->legacy.dev_ready) {
if (chip->legacy.dev_ready(chip))
break;
} else {
int ret;
u8 status;
ret = nand_read_data_op(chip, &status, sizeof(status),
true, false);
if (ret)
return;
if (status & NAND_STATUS_READY)
break;
}
mdelay(1);
}
}
static bool nand_supports_get_features(struct nand_chip *chip, int addr)
{
return (chip->parameters.supports_set_get_features &&
test_bit(addr, chip->parameters.get_feature_list));
}
static bool nand_supports_set_features(struct nand_chip *chip, int addr)
{
return (chip->parameters.supports_set_get_features &&
test_bit(addr, chip->parameters.set_feature_list));
}
/**
* nand_reset_interface - Reset data interface and timings
* @chip: The NAND chip
* @chipnr: Internal die id
*
* Reset the Data interface and timings to ONFI mode 0.
*
* Returns 0 for success or negative error code otherwise.
*/
static int nand_reset_interface(struct nand_chip *chip, int chipnr)
{
const struct nand_controller_ops *ops = chip->controller->ops;
int ret;
if (!nand_controller_can_setup_interface(chip))
return 0;
/*
* The ONFI specification says:
* "
* To transition from NV-DDR or NV-DDR2 to the SDR data
* interface, the host shall use the Reset (FFh) command
* using SDR timing mode 0. A device in any timing mode is
* required to recognize Reset (FFh) command issued in SDR
* timing mode 0.
* "
*
* Configure the data interface in SDR mode and set the
* timings to timing mode 0.
*/
chip->current_interface_config = nand_get_reset_interface_config();
ret = ops->setup_interface(chip, chipnr,
chip->current_interface_config);
if (ret)
pr_err("Failed to configure data interface to SDR timing mode 0\n");
return ret;
}
/**
* nand_setup_interface - Setup the best data interface and timings
* @chip: The NAND chip
* @chipnr: Internal die id
*
* Configure what has been reported to be the best data interface and NAND
* timings supported by the chip and the driver.
*
* Returns 0 for success or negative error code otherwise.
*/
static int nand_setup_interface(struct nand_chip *chip, int chipnr)
{
const struct nand_controller_ops *ops = chip->controller->ops;
u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }, request;
int ret;
if (!nand_controller_can_setup_interface(chip))
return 0;
/*
* A nand_reset_interface() put both the NAND chip and the NAND
* controller in timings mode 0. If the default mode for this chip is
* also 0, no need to proceed to the change again. Plus, at probe time,
* nand_setup_interface() uses ->set/get_features() which would
* fail anyway as the parameter page is not available yet.
*/
if (!chip->best_interface_config)
return 0;
request = chip->best_interface_config->timings.mode;
if (nand_interface_is_sdr(chip->best_interface_config))
request |= ONFI_DATA_INTERFACE_SDR;
else
request |= ONFI_DATA_INTERFACE_NVDDR;
tmode_param[0] = request;
/* Change the mode on the chip side (if supported by the NAND chip) */
if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
nand_select_target(chip, chipnr);
ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
tmode_param);
nand_deselect_target(chip);
if (ret)
return ret;
}
/* Change the mode on the controller side */
ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
if (ret)
return ret;
/* Check the mode has been accepted by the chip, if supported */
if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
goto update_interface_config;
memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
nand_select_target(chip, chipnr);
ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
tmode_param);
nand_deselect_target(chip);
if (ret)
goto err_reset_chip;
if (request != tmode_param[0]) {
pr_warn("%s timing mode %d not acknowledged by the NAND chip\n",
nand_interface_is_nvddr(chip->best_interface_config) ? "NV-DDR" : "SDR",
chip->best_interface_config->timings.mode);
pr_debug("NAND chip would work in %s timing mode %d\n",
tmode_param[0] & ONFI_DATA_INTERFACE_NVDDR ? "NV-DDR" : "SDR",
(unsigned int)ONFI_TIMING_MODE_PARAM(tmode_param[0]));
goto err_reset_chip;
}
update_interface_config:
chip->current_interface_config = chip->best_interface_config;
return 0;
err_reset_chip:
/*
* Fallback to mode 0 if the chip explicitly did not ack the chosen
* timing mode.
*/
nand_reset_interface(chip, chipnr);
nand_select_target(chip, chipnr);
nand_reset_op(chip);
nand_deselect_target(chip);
return ret;
}
/**
* nand_choose_best_sdr_timings - Pick up the best SDR timings that both the
* NAND controller and the NAND chip support
* @chip: the NAND chip
* @iface: the interface configuration (can eventually be updated)
* @spec_timings: specific timings, when not fitting the ONFI specification
*
* If specific timings are provided, use them. Otherwise, retrieve supported
* timing modes from ONFI information.
*/
int nand_choose_best_sdr_timings(struct nand_chip *chip,
struct nand_interface_config *iface,
struct nand_sdr_timings *spec_timings)
{
const struct nand_controller_ops *ops = chip->controller->ops;
int best_mode = 0, mode, ret = -EOPNOTSUPP;
iface->type = NAND_SDR_IFACE;
if (spec_timings) {
iface->timings.sdr = *spec_timings;
iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
/* Verify the controller supports the requested interface */
ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
iface);
if (!ret) {
chip->best_interface_config = iface;
return ret;
}
/* Fallback to slower modes */
best_mode = iface->timings.mode;
} else if (chip->parameters.onfi) {
best_mode = fls(chip->parameters.onfi->sdr_timing_modes) - 1;
}
for (mode = best_mode; mode >= 0; mode--) {
onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
iface);
if (!ret) {
chip->best_interface_config = iface;
break;
}
}
return ret;
}
/**
* nand_choose_best_nvddr_timings - Pick up the best NVDDR timings that both the
* NAND controller and the NAND chip support
* @chip: the NAND chip
* @iface: the interface configuration (can eventually be updated)
* @spec_timings: specific timings, when not fitting the ONFI specification
*
* If specific timings are provided, use them. Otherwise, retrieve supported
* timing modes from ONFI information.
*/
int nand_choose_best_nvddr_timings(struct nand_chip *chip,
struct nand_interface_config *iface,
struct nand_nvddr_timings *spec_timings)
{
const struct nand_controller_ops *ops = chip->controller->ops;
int best_mode = 0, mode, ret = -EOPNOTSUPP;
iface->type = NAND_NVDDR_IFACE;
if (spec_timings) {
iface->timings.nvddr = *spec_timings;
iface->timings.mode = onfi_find_closest_nvddr_mode(spec_timings);
/* Verify the controller supports the requested interface */
ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
iface);
if (!ret) {
chip->best_interface_config = iface;
return ret;
}
/* Fallback to slower modes */
best_mode = iface->timings.mode;
} else if (chip->parameters.onfi) {
best_mode = fls(chip->parameters.onfi->nvddr_timing_modes) - 1;
}
for (mode = best_mode; mode >= 0; mode--) {
onfi_fill_interface_config(chip, iface, NAND_NVDDR_IFACE, mode);
ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
iface);
if (!ret) {
chip->best_interface_config = iface;
break;
}
}
return ret;
}
/**
* nand_choose_best_timings - Pick up the best NVDDR or SDR timings that both
* NAND controller and the NAND chip support
* @chip: the NAND chip
* @iface: the interface configuration (can eventually be updated)
*
* If specific timings are provided, use them. Otherwise, retrieve supported
* timing modes from ONFI information.
*/
static int nand_choose_best_timings(struct nand_chip *chip,
struct nand_interface_config *iface)
{
int ret;
/* Try the fastest timings: NV-DDR */
ret = nand_choose_best_nvddr_timings(chip, iface, NULL);
if (!ret)
return 0;
/* Fallback to SDR timings otherwise */
return nand_choose_best_sdr_timings(chip, iface, NULL);
}
/**
* nand_choose_interface_config - find the best data interface and timings
* @chip: The NAND chip
*
* Find the best data interface and NAND timings supported by the chip
* and the driver. Eventually let the NAND manufacturer driver propose his own
* set of timings.
*
* After this function nand_chip->interface_config is initialized with the best
* timing mode available.
*
* Returns 0 for success or negative error code otherwise.
*/
static int nand_choose_interface_config(struct nand_chip *chip)
{
struct nand_interface_config *iface;
int ret;
if (!nand_controller_can_setup_interface(chip))
return 0;
iface = kzalloc(sizeof(*iface), GFP_KERNEL);
if (!iface)
return -ENOMEM;
if (chip->ops.choose_interface_config)
ret = chip->ops.choose_interface_config(chip, iface);
else
ret = nand_choose_best_timings(chip, iface);
if (ret)
kfree(iface);
return ret;
}
/**
* nand_fill_column_cycles - fill the column cycles of an address
* @chip: The NAND chip
* @addrs: Array of address cycles to fill
* @offset_in_page: The offset in the page
*
* Fills the first or the first two bytes of the @addrs field depending
* on the NAND bus width and the page size.
*
* Returns the number of cycles needed to encode the column, or a negative
* error code in case one of the arguments is invalid.
*/
static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
unsigned int offset_in_page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
/* Make sure the offset is less than the actual page size. */
if (offset_in_page > mtd->writesize + mtd->oobsize)
return -EINVAL;
/*
* On small page NANDs, there's a dedicated command to access the OOB
* area, and the column address is relative to the start of the OOB
* area, not the start of the page. Asjust the address accordingly.
*/
if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
offset_in_page -= mtd->writesize;
/*
* The offset in page is expressed in bytes, if the NAND bus is 16-bit
* wide, then it must be divided by 2.
*/
if (chip->options & NAND_BUSWIDTH_16) {
if (WARN_ON(offset_in_page % 2))
return -EINVAL;
offset_in_page /= 2;
}
addrs[0] = offset_in_page;
/*
* Small page NANDs use 1 cycle for the columns, while large page NANDs
* need 2
*/
if (mtd->writesize <= 512)
return 1;
addrs[1] = offset_in_page >> 8;
return 2;
}
static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf,
unsigned int len)
{
const struct nand_interface_config *conf =
nand_get_interface_config(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
u8 addrs[4];
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_READ0, 0),
NAND_OP_ADDR(3, addrs, NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
NAND_COMMON_TIMING_NS(conf, tRR_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int ret;
/* Drop the DATA_IN instruction if len is set to 0. */
if (!len)
op.ninstrs--;
if (offset_in_page >= mtd->writesize)
instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
else if (offset_in_page >= 256 &&
!(chip->options & NAND_BUSWIDTH_16))
instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
if (ret < 0)
return ret;
addrs[1] = page;
addrs[2] = page >> 8;
if (chip->options & NAND_ROW_ADDR_3) {
addrs[3] = page >> 16;
instrs[1].ctx.addr.naddrs++;
}
return nand_exec_op(chip, &op);
}
static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf,
unsigned int len)
{
const struct nand_interface_config *conf =
nand_get_interface_config(chip);
u8 addrs[5];
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_READ0, 0),
NAND_OP_ADDR(4, addrs, 0),
NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
NAND_COMMON_TIMING_NS(conf, tRR_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int ret;
/* Drop the DATA_IN instruction if len is set to 0. */
if (!len)
op.ninstrs--;
ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
if (ret < 0)
return ret;
addrs[2] = page;
addrs[3] = page >> 8;
if (chip->options & NAND_ROW_ADDR_3) {
addrs[4] = page >> 16;
instrs[1].ctx.addr.naddrs++;
}
return nand_exec_op(chip, &op);
}
static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf,
unsigned int len, bool check_only)
{
const struct nand_interface_config *conf =
nand_get_interface_config(chip);
u8 addrs[5];
struct nand_op_instr start_instrs[] = {
NAND_OP_CMD(NAND_CMD_READ0, 0),
NAND_OP_ADDR(4, addrs, 0),
NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 0),
NAND_OP_CMD(NAND_CMD_READCACHESEQ, NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
NAND_COMMON_TIMING_NS(conf, tRR_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_op_instr cont_instrs[] = {
NAND_OP_CMD(page == chip->cont_read.last_page ?
NAND_CMD_READCACHEEND : NAND_CMD_READCACHESEQ,
NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
NAND_COMMON_TIMING_NS(conf, tRR_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_operation start_op = NAND_OPERATION(chip->cur_cs, start_instrs);
struct nand_operation cont_op = NAND_OPERATION(chip->cur_cs, cont_instrs);
int ret;
if (!len) {
start_op.ninstrs--;
cont_op.ninstrs--;
}
ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
if (ret < 0)
return ret;
addrs[2] = page;
addrs[3] = page >> 8;
if (chip->options & NAND_ROW_ADDR_3) {
addrs[4] = page >> 16;
start_instrs[1].ctx.addr.naddrs++;
}
/* Check if cache reads are supported */
if (check_only) {
if (nand_check_op(chip, &start_op) || nand_check_op(chip, &cont_op))
return -EOPNOTSUPP;
return 0;
}
if (page == chip->cont_read.first_page)
return nand_exec_op(chip, &start_op);
else
return nand_exec_op(chip, &cont_op);
}
static bool rawnand_cont_read_ongoing(struct nand_chip *chip, unsigned int page)
{
return chip->cont_read.ongoing &&
page >= chip->cont_read.first_page &&
page <= chip->cont_read.last_page;
}
/**
* nand_read_page_op - Do a READ PAGE operation
* @chip: The NAND chip
* @page: page to read
* @offset_in_page: offset within the page
* @buf: buffer used to store the data
* @len: length of the buffer
*
* This function issues a READ PAGE operation.
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
int nand_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf, unsigned int len)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (len && !buf)
return -EINVAL;
if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL;
if (nand_has_exec_op(chip)) {
if (mtd->writesize > 512) {
if (rawnand_cont_read_ongoing(chip, page))
return nand_lp_exec_cont_read_page_op(chip, page,
offset_in_page,
buf, len, false);
else
return nand_lp_exec_read_page_op(chip, page,
offset_in_page, buf,
len);
}
return nand_sp_exec_read_page_op(chip, page, offset_in_page,
buf, len);
}
chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
if (len)
chip->legacy.read_buf(chip, buf, len);
return 0;
}
EXPORT_SYMBOL_GPL(nand_read_page_op);
/**
* nand_read_param_page_op - Do a READ PARAMETER PAGE operation
* @chip: The NAND chip
* @page: parameter page to read
* @buf: buffer used to store the data
* @len: length of the buffer
*
* This function issues a READ PARAMETER PAGE operation.
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
unsigned int len)
{
unsigned int i;
u8 *p = buf;
if (len && !buf)
return -EINVAL;
if (nand_has_exec_op(chip)) {
const struct nand_interface_config *conf =
nand_get_interface_config(chip);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_PARAM, 0),
NAND_OP_ADDR(1, &page,
NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
NAND_COMMON_TIMING_NS(conf, tRR_min)),
NAND_OP_8BIT_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
/* Drop the DATA_IN instruction if len is set to 0. */
if (!len)
op.ninstrs--;
return nand_exec_op(chip, &op);
}
chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
for (i = 0; i < len; i++)
p[i] = chip->legacy.read_byte(chip);
return 0;
}
/**
* nand_change_read_column_op - Do a CHANGE READ COLUMN operation
* @chip: The NAND chip
* @offset_in_page: offset within the page
* @buf: buffer used to store the data
* @len: length of the buffer
* @force_8bit: force 8-bit bus access
*
* This function issues a CHANGE READ COLUMN operation.
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
int nand_change_read_column_op(struct nand_chip *chip,
unsigned int offset_in_page, void *buf,
unsigned int len, bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (len && !buf)
return -EINVAL;
if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL;
/* Small page NANDs do not support column change. */
if (mtd->writesize <= 512)
return -ENOTSUPP;
if (nand_has_exec_op(chip)) {
const struct nand_interface_config *conf =
nand_get_interface_config(chip);
u8 addrs[2] = {};
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
NAND_OP_ADDR(2, addrs, 0),
NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
NAND_COMMON_TIMING_NS(conf, tCCS_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int ret;
ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
if (ret < 0)
return ret;
/* Drop the DATA_IN instruction if len is set to 0. */
if (!len)
op.ninstrs--;
instrs[3].ctx.data.force_8bit = force_8bit;
return nand_exec_op(chip, &op);
}
chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
if (len)
chip->legacy.read_buf(chip, buf, len);
return 0;
}
EXPORT_SYMBOL_GPL(nand_change_read_column_op);
/**
* nand_read_oob_op - Do a READ OOB operation
* @chip: The NAND chip
* @page: page to read
* @offset_in_oob: offset within the OOB area
* @buf: buffer used to store the data
* @len: length of the buffer
*
* This function issues a READ OOB operation.
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_oob, void *buf, unsigned int len)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (len && !buf)
return -EINVAL;
if (offset_in_oob + len > mtd->oobsize)
return -EINVAL;
if (nand_has_exec_op(chip))
return nand_read_page_op(chip, page,
mtd->writesize + offset_in_oob,
buf, len);
chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
if (len)
chip->legacy.read_buf(chip, buf, len);
return 0;
}
EXPORT_SYMBOL_GPL(nand_read_oob_op);
static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, const void *buf,
unsigned int len, bool prog)
{
const struct nand_interface_config *conf =
nand_get_interface_config(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
u8 addrs[5] = {};
struct nand_op_instr instrs[] = {
/*
* The first instruction will be dropped if we're dealing
* with a large page NAND and adjusted if we're dealing
* with a small page NAND and the page offset is > 255.
*/
NAND_OP_CMD(NAND_CMD_READ0, 0),
NAND_OP_CMD(NAND_CMD_SEQIN, 0),
NAND_OP_ADDR(0, addrs, NAND_COMMON_TIMING_NS(conf, tADL_min)),
NAND_OP_DATA_OUT(len, buf, 0),
NAND_OP_CMD(NAND_CMD_PAGEPROG,
NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
if (naddrs < 0)
return naddrs;
addrs[naddrs++] = page;
addrs[naddrs++] = page >> 8;
if (chip->options & NAND_ROW_ADDR_3)
addrs[naddrs++] = page >> 16;
instrs[2].ctx.addr.naddrs = naddrs;
/* Drop the last two instructions if we're not programming the page. */
if (!prog) {
op.ninstrs -= 2;
/* Also drop the DATA_OUT instruction if empty. */
if (!len)
op.ninstrs--;
}
if (mtd->writesize <= 512) {
/*
* Small pages need some more tweaking: we have to adjust the
* first instruction depending on the page offset we're trying
* to access.
*/
if (offset_in_page >= mtd->writesize)
instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
else if (offset_in_page >= 256 &&
!(chip->options & NAND_BUSWIDTH_16))
instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
} else {
/*
* Drop the first command if we're dealing with a large page
* NAND.
*/
op.instrs++;
op.ninstrs--;
}
return nand_exec_op(chip, &op);
}
/**
* nand_prog_page_begin_op - starts a PROG PAGE operation
* @chip: The NAND chip
* @page: page to write
* @offset_in_page: offset within the page
* @buf: buffer containing the data to write to the page
* @len: length of the buffer
*
* This function issues the first half of a PROG PAGE operation.
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, const void *buf,
unsigned int len)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (len && !buf)
return -EINVAL;
if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL;
if (nand_has_exec_op(chip))
return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
len, false);
chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
if (buf)
chip->legacy.write_buf(chip, buf, len);
return 0;
}
EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
/**
* nand_prog_page_end_op - ends a PROG PAGE operation
* @chip: The NAND chip
*
* This function issues the second half of a PROG PAGE operation.
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
int nand_prog_page_end_op(struct nand_chip *chip)
{
int ret;
u8 status;
if (nand_has_exec_op(chip)) {
const struct nand_interface_config *conf =
nand_get_interface_config(chip);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_PAGEPROG,
NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max),
0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
ret = nand_exec_op(chip, &op);
if (ret)
return ret;
ret = nand_status_op(chip, &status);
if (ret)
return ret;
} else {
chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
ret = chip->legacy.waitfunc(chip);
if (ret < 0)
return ret;
status = ret;
}
if (status & NAND_STATUS_FAIL)
return -EIO;
return 0;
}
EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
/**
* nand_prog_page_op - Do a full PROG PAGE operation
* @chip: The NAND chip
* @page: page to write
* @offset_in_page: offset within the page
* @buf: buffer containing the data to write to the page
* @len: length of the buffer
*
* This function issues a full PROG PAGE operation.
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, const void *buf,
unsigned int len)
{
struct mtd_info *mtd = nand_to_mtd(chip);
u8 status;
int ret;
if (!len || !buf)
return -EINVAL;
if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL;
if (nand_has_exec_op(chip)) {
ret = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
len, true);
if (ret)
return ret;
ret = nand_status_op(chip, &status);
if (ret)
return ret;
} else {
chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
page);
chip->legacy.write_buf(chip, buf, len);
chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
ret = chip->legacy.waitfunc(chip);
if (ret < 0)
return ret;
status = ret;
}
if (status & NAND_STATUS_FAIL)
return -EIO;
return 0;
}
EXPORT_SYMBOL_GPL(nand_prog_page_op);
/**
* nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
* @chip: The NAND chip
* @offset_in_page: offset within the page
* @buf: buffer containing the data to send to the NAND
* @len: length of the buffer
* @force_8bit: force 8-bit bus access
*
* This function issues a CHANGE WRITE COLUMN operation.
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
int nand_change_write_column_op(struct nand_chip *chip,
unsigned int offset_in_page,
const void *buf, unsigned int len,
bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (len && !buf)
return -EINVAL;
if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL;
/* Small page NANDs do not support column change. */
if (mtd->writesize <= 512)
return -ENOTSUPP;
if (nand_has_exec_op(chip)) {
const struct nand_interface_config *conf =
nand_get_interface_config(chip);
u8 addrs[2];
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_RNDIN, 0),
NAND_OP_ADDR(2, addrs, NAND_COMMON_TIMING_NS(conf, tCCS_min)),
NAND_OP_DATA_OUT(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int ret;
ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
if (ret < 0)
return ret;
instrs[2].ctx.data.force_8bit = force_8bit;
/* Drop the DATA_OUT instruction if len is set to 0. */
if (!len)
op.ninstrs--;
return nand_exec_op(chip, &op);
}
chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
if (len)
chip->legacy.write_buf(chip, buf, len);
return 0;
}
EXPORT_SYMBOL_GPL(nand_change_write_column_op);
/**
* nand_readid_op - Do a READID operation
* @chip: The NAND chip
* @addr: address cycle to pass after the READID command
* @buf: buffer used to store the ID
* @len: length of the buffer
*
* This function sends a READID command and reads back the ID returned by the
* NAND.
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
unsigned int len)
{
unsigned int i;
u8 *id = buf, *ddrbuf = NULL;
if (len && !buf)
return -EINVAL;
if (nand_has_exec_op(chip)) {
const struct nand_interface_config *conf =
nand_get_interface_config(chip);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_READID, 0),
NAND_OP_ADDR(1, &addr,
NAND_COMMON_TIMING_NS(conf, tADL_min)),
NAND_OP_8BIT_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int ret;
/* READ_ID data bytes are received twice in NV-DDR mode */
if (len && nand_interface_is_nvddr(conf)) {
ddrbuf = kzalloc(len * 2, GFP_KERNEL);
if (!ddrbuf)
return -ENOMEM;
instrs[2].ctx.data.len *= 2;
instrs[2].ctx.data.buf.in = ddrbuf;
}
/* Drop the DATA_IN instruction if len is set to 0. */
if (!len)
op.ninstrs--;
ret = nand_exec_op(chip, &op);
if (!ret && len && nand_interface_is_nvddr(conf)) {
for (i = 0; i < len; i++)
id[i] = ddrbuf[i * 2];
}
kfree(ddrbuf);
return ret;
}
chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
for (i = 0; i < len; i++)
id[i] = chip->legacy.read_byte(chip);
return 0;
}
EXPORT_SYMBOL_GPL(nand_readid_op);
/**
* nand_status_op - Do a STATUS operation
* @chip: The NAND chip
* @status: out variable to store the NAND status
*
* This function sends a STATUS command and reads back the status returned by
* the NAND.
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
int nand_status_op(struct nand_chip *chip, u8 *status)
{
if (nand_has_exec_op(chip)) {
const struct nand_interface_config *conf =
nand_get_interface_config(chip);
u8 ddrstatus[2];
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_STATUS,
NAND_COMMON_TIMING_NS(conf, tADL_min)),
NAND_OP_8BIT_DATA_IN(1, status, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int ret;
/* The status data byte will be received twice in NV-DDR mode */
if (status && nand_interface_is_nvddr(conf)) {
instrs[1].ctx.data.len *= 2;
instrs[1].ctx.data.buf.in = ddrstatus;
}
if (!status)
op.ninstrs--;
ret = nand_exec_op(chip, &op);
if (!ret && status && nand_interface_is_nvddr(conf))
*status = ddrstatus[0];
return ret;
}
chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
if (status)
*status = chip->legacy.read_byte(chip);
return 0;
}
EXPORT_SYMBOL_GPL(nand_status_op);
/**
* nand_exit_status_op - Exit a STATUS operation
* @chip: The NAND chip
*
* This function sends a READ0 command to cancel the effect of the STATUS
* command to avoid reading only the status until a new read command is sent.
*
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
int nand_exit_status_op(struct nand_chip *chip)
{
if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_READ0, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
return 0;
}
EXPORT_SYMBOL_GPL(nand_exit_status_op);
/**
* nand_erase_op - Do an erase operation
* @chip: The NAND chip
* @eraseblock: block to erase
*
* This function sends an ERASE command and waits for the NAND to be ready
* before returning.
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
{
unsigned int page = eraseblock <<
(chip->phys_erase_shift - chip->page_shift);
int ret;
u8 status;
if (nand_has_exec_op(chip)) {
const struct nand_interface_config *conf =
nand_get_interface_config(chip);
u8 addrs[3] = { page, page >> 8, page >> 16 };
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_ERASE1, 0),
NAND_OP_ADDR(2, addrs, 0),
NAND_OP_CMD(NAND_CMD_ERASE2,
NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
if (chip->options & NAND_ROW_ADDR_3)
instrs[1].ctx.addr.naddrs++;
ret = nand_exec_op(chip, &op);
if (ret)
return ret;
ret = nand_status_op(chip, &status);
if (ret)
return ret;
} else {
chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
ret = chip->legacy.waitfunc(chip);
if (ret < 0)
return ret;
status = ret;
}
if (status & NAND_STATUS_FAIL)
return -EIO;
return 0;
}
EXPORT_SYMBOL_GPL(nand_erase_op);
/**
* nand_set_features_op - Do a SET FEATURES operation
* @chip: The NAND chip
* @feature: feature id
* @data: 4 bytes of data
*
* This function sends a SET FEATURES command and waits for the NAND to be
* ready before returning.
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
static int nand_set_features_op(struct nand_chip *chip, u8 feature,
const void *data)
{
const u8 *params = data;
int i, ret;
if (nand_has_exec_op(chip)) {
const struct nand_interface_config *conf =
nand_get_interface_config(chip);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf,
tADL_min)),
NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
NAND_COMMON_TIMING_NS(conf,
tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
chip->legacy.write_byte(chip, params[i]);
ret = chip->legacy.waitfunc(chip);
if (ret < 0)
return ret;
if (ret & NAND_STATUS_FAIL)
return -EIO;
return 0;
}
/**
* nand_get_features_op - Do a GET FEATURES operation
* @chip: The NAND chip
* @feature: feature id
* @data: 4 bytes of data
*
* This function sends a GET FEATURES command and waits for the NAND to be
* ready before returning.
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
static int nand_get_features_op(struct nand_chip *chip, u8 feature,
void *data)
{
u8 *params = data, ddrbuf[ONFI_SUBFEATURE_PARAM_LEN * 2];
int i;
if (nand_has_exec_op(chip)) {
const struct nand_interface_config *conf =
nand_get_interface_config(chip);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
NAND_OP_ADDR(1, &feature,
NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
NAND_COMMON_TIMING_NS(conf, tRR_min)),
NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
data, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int ret;
/* GET_FEATURE data bytes are received twice in NV-DDR mode */
if (nand_interface_is_nvddr(conf)) {
instrs[3].ctx.data.len *= 2;
instrs[3].ctx.data.buf.in = ddrbuf;
}
ret = nand_exec_op(chip, &op);
if (nand_interface_is_nvddr(conf)) {
for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; i++)
params[i] = ddrbuf[i * 2];
}
return ret;
}
chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
params[i] = chip->legacy.read_byte(chip);
return 0;
}
static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
unsigned int delay_ns)
{
if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
PSEC_TO_NSEC(delay_ns)),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
/* Apply delay or wait for ready/busy pin */
if (!chip->legacy.dev_ready)
udelay(chip->legacy.chip_delay);
else
nand_wait_ready(chip);
return 0;
}
/**
* nand_reset_op - Do a reset operation
* @chip: The NAND chip
*
* This function sends a RESET command and waits for the NAND to be ready
* before returning.
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
int nand_reset_op(struct nand_chip *chip)
{
if (nand_has_exec_op(chip)) {
const struct nand_interface_config *conf =
nand_get_interface_config(chip);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_RESET,
NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tRST_max),
0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
return 0;
}
EXPORT_SYMBOL_GPL(nand_reset_op);
/**
* nand_read_data_op - Read data from the NAND
* @chip: The NAND chip
* @buf: buffer used to store the data
* @len: length of the buffer
* @force_8bit: force 8-bit bus access
* @check_only: do not actually run the command, only checks if the
* controller driver supports it
*
* This function does a raw data read on the bus. Usually used after launching
* another NAND operation like nand_read_page_op().
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
bool force_8bit, bool check_only)
{
if (!len || !buf)
return -EINVAL;
if (nand_has_exec_op(chip)) {
const struct nand_interface_config *conf =
nand_get_interface_config(chip);
struct nand_op_instr instrs[] = {
NAND_OP_DATA_IN(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
u8 *ddrbuf = NULL;
int ret, i;
instrs[0].ctx.data.force_8bit = force_8bit;
/*
* Parameter payloads (ID, status, features, etc) do not go
* through the same pipeline as regular data, hence the
* force_8bit flag must be set and this also indicates that in
* case NV-DDR timings are being used the data will be received
* twice.
*/
if (force_8bit && nand_interface_is_nvddr(conf)) {
ddrbuf = kzalloc(len * 2, GFP_KERNEL);
if (!ddrbuf)
return -ENOMEM;
instrs[0].ctx.data.len *= 2;
instrs[0].ctx.data.buf.in = ddrbuf;
}
if (check_only) {
ret = nand_check_op(chip, &op);
kfree(ddrbuf);
return ret;
}
ret = nand_exec_op(chip, &op);
if (!ret && force_8bit && nand_interface_is_nvddr(conf)) {
u8 *dst = buf;
for (i = 0; i < len; i++)
dst[i] = ddrbuf[i * 2];
}
kfree(ddrbuf);
return ret;
}
if (check_only)
return 0;
if (force_8bit) {
u8 *p = buf;
unsigned int i;
for (i = 0; i < len; i++)
p[i] = chip->legacy.read_byte(chip);
} else {
chip->legacy.read_buf(chip, buf, len);
}
return 0;
}
EXPORT_SYMBOL_GPL(nand_read_data_op);
/**
* nand_write_data_op - Write data from the NAND
* @chip: The NAND chip
* @buf: buffer containing the data to send on the bus
* @len: length of the buffer
* @force_8bit: force 8-bit bus access
*
* This function does a raw data write on the bus. Usually used after launching
* another NAND operation like nand_write_page_begin_op().
* This function does not select/unselect the CS line.
*
* Returns 0 on success, a negative error code otherwise.
*/
int nand_write_data_op(struct nand_chip *chip, const void *buf,
unsigned int len, bool force_8bit)
{
if (!len || !buf)
return -EINVAL;
if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_DATA_OUT(len, buf, 0),
};
struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
instrs[0].ctx.data.force_8bit = force_8bit;
return nand_exec_op(chip, &op);
}
if (force_8bit) {
const u8 *p = buf;
unsigned int i;
for (i = 0; i < len; i++)
chip->legacy.write_byte(chip, p[i]);
} else {
chip->legacy.write_buf(chip, buf, len);
}
return 0;
}
EXPORT_SYMBOL_GPL(nand_write_data_op);
/**
* struct nand_op_parser_ctx - Context used by the parser
* @instrs: array of all the instructions that must be addressed
* @ninstrs: length of the @instrs array
* @subop: Sub-operation to be passed to the NAND controller
*
* This structure is used by the core to split NAND operations into
* sub-operations that can be handled by the NAND controller.
*/
struct nand_op_parser_ctx {
const struct nand_op_instr *instrs;
unsigned int ninstrs;
struct nand_subop subop;
};
/**
* nand_op_parser_must_split_instr - Checks if an instruction must be split
* @pat: the parser pattern element that matches @instr
* @instr: pointer to the instruction to check
* @start_offset: this is an in/out parameter. If @instr has already been
* split, then @start_offset is the offset from which to start
* (either an address cycle or an offset in the data buffer).
* Conversely, if the function returns true (ie. instr must be
* split), this parameter is updated to point to the first
* data/address cycle that has not been taken care of.
*
* Some NAND controllers are limited and cannot send X address cycles with a
* unique operation, or cannot read/write more than Y bytes at the same time.
* In this case, split the instruction that does not fit in a single
* controller-operation into two or more chunks.
*
* Returns true if the instruction must be split, false otherwise.
* The @start_offset parameter is also updated to the offset at which the next
* bundle of instruction must start (if an address or a data instruction).
*/
static bool
nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
const struct nand_op_instr *instr,
unsigned int *start_offset)
{
switch (pat->type) {
case NAND_OP_ADDR_INSTR:
if (!pat->ctx.addr.maxcycles)
break;
if (instr->ctx.addr.naddrs - *start_offset >
pat->ctx.addr.maxcycles) {
*start_offset += pat->ctx.addr.maxcycles;
return true;
}
break;
case NAND_OP_DATA_IN_INSTR:
case NAND_OP_DATA_OUT_INSTR:
if (!pat->ctx.data.maxlen)
break;
if (instr->ctx.data.len - *start_offset >
pat->ctx.data.maxlen) {
*start_offset += pat->ctx.data.maxlen;
return true;
}
break;
default:
break;
}
return false;
}
/**
* nand_op_parser_match_pat - Checks if a pattern matches the instructions
* remaining in the parser context
* @pat: the pattern to test
* @ctx: the parser context structure to match with the pattern @pat
*
* Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
* Returns true if this is the case, false ortherwise. When true is returned,
* @ctx->subop is updated with the set of instructions to be passed to the
* controller driver.
*/
static bool
nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
struct nand_op_parser_ctx *ctx)
{
unsigned int instr_offset = ctx->subop.first_instr_start_off;
const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
const struct nand_op_instr *instr = ctx->subop.instrs;
unsigned int i, ninstrs;
for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
/*
* The pattern instruction does not match the operation
* instruction. If the instruction is marked optional in the
* pattern definition, we skip the pattern element and continue
* to the next one. If the element is mandatory, there's no
* match and we can return false directly.
*/
if (instr->type != pat->elems[i].type) {
if (!pat->elems[i].optional)
return false;
continue;
}
/*
* Now check the pattern element constraints. If the pattern is
* not able to handle the whole instruction in a single step,
* we have to split it.
* The last_instr_end_off value comes back updated to point to
* the position where we have to split the instruction (the
* start of the next subop chunk).
*/
if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
&instr_offset)) {
ninstrs++;
i++;
break;
}
instr++;
ninstrs++;
instr_offset = 0;
}
/*
* This can happen if all instructions of a pattern are optional.
* Still, if there's not at least one instruction handled by this
* pattern, this is not a match, and we should try the next one (if
* any).
*/
if (!ninstrs)
return false;
/*
* We had a match on the pattern head, but the pattern may be longer
* than the instructions we're asked to execute. We need to make sure
* there's no mandatory elements in the pattern tail.
*/
for (; i < pat->nelems; i++) {
if (!pat->elems[i].optional)
return false;
}
/*
* We have a match: update the subop structure accordingly and return
* true.
*/
ctx->subop.ninstrs = ninstrs;
ctx->subop.last_instr_end_off = instr_offset;
return true;
}
#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
{
const struct nand_op_instr *instr;
char *prefix = " ";
unsigned int i;
pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
for (i = 0; i < ctx->ninstrs; i++) {
instr = &ctx->instrs[i];
if (instr == &ctx->subop.instrs[0])
prefix = " ->";
nand_op_trace(prefix, instr);
if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
prefix = " ";
}
}
#else
static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
{
/* NOP */
}
#endif
static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
const struct nand_op_parser_ctx *b)
{
if (a->subop.ninstrs < b->subop.ninstrs)
return -1;
else if (a->subop.ninstrs > b->subop.ninstrs)
return 1;
if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
return -1;
else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
return 1;
return 0;
}
/**
* nand_op_parser_exec_op - exec_op parser
* @chip: the NAND chip
* @parser: patterns description provided by the controller driver
* @op: the NAND operation to address
* @check_only: when true, the function only checks if @op can be handled but
* does not execute the operation
*
* Helper function designed to ease integration of NAND controller drivers that
* only support a limited set of instruction sequences. The supported sequences
* are described in @parser, and the framework takes care of splitting @op into
* multiple sub-operations (if required) and pass them back to the ->exec()
* callback of the matching pattern if @check_only is set to false.
*
* NAND controller drivers should call this function from their own ->exec_op()
* implementation.
*
* Returns 0 on success, a negative error code otherwise. A failure can be
* caused by an unsupported operation (none of the supported patterns is able
* to handle the requested operation), or an error returned by one of the
* matching pattern->exec() hook.
*/
int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_op_parser *parser,
const struct nand_operation *op, bool check_only)
{
struct nand_op_parser_ctx ctx = {
.subop.cs = op->cs,
.subop.instrs = op->instrs,
.instrs = op->instrs,
.ninstrs = op->ninstrs,
};
unsigned int i;
while (ctx.subop.instrs < op->instrs + op->ninstrs) {
const struct nand_op_parser_pattern *pattern;
struct nand_op_parser_ctx best_ctx;
int ret, best_pattern = -1;
for (i = 0; i < parser->npatterns; i++) {
struct nand_op_parser_ctx test_ctx = ctx;
pattern = &parser->patterns[i];
if (!nand_op_parser_match_pat(pattern, &test_ctx))
continue;
if (best_pattern >= 0 &&
nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
continue;
best_pattern = i;
best_ctx = test_ctx;
}
if (best_pattern < 0) {
pr_debug("->exec_op() parser: pattern not found!\n");
return -ENOTSUPP;
}
ctx = best_ctx;
nand_op_parser_trace(&ctx);
if (!check_only) {
pattern = &parser->patterns[best_pattern];
ret = pattern->exec(chip, &ctx.subop);
if (ret)
return ret;
}
/*
* Update the context structure by pointing to the start of the
* next subop.
*/
ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
if (ctx.subop.last_instr_end_off)
ctx.subop.instrs -= 1;
ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
}
return 0;
}
EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
static bool nand_instr_is_data(const struct nand_op_instr *instr)
{
return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
instr->type == NAND_OP_DATA_OUT_INSTR);
}
static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
unsigned int instr_idx)
{
return subop && instr_idx < subop->ninstrs;
}
static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
unsigned int instr_idx)
{
if (instr_idx)
return 0;
return subop->first_instr_start_off;
}
/**
* nand_subop_get_addr_start_off - Get the start offset in an address array
* @subop: The entire sub-operation
* @instr_idx: Index of the instruction inside the sub-operation
*
* During driver development, one could be tempted to directly use the
* ->addr.addrs field of address instructions. This is wrong as address
* instructions might be split.
*
* Given an address instruction, returns the offset of the first cycle to issue.
*/
unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
unsigned int instr_idx)
{
if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
return 0;
return nand_subop_get_start_off(subop, instr_idx);
}
EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
/**
* nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
* @subop: The entire sub-operation
* @instr_idx: Index of the instruction inside the sub-operation
*
* During driver development, one could be tempted to directly use the
* ->addr->naddrs field of a data instruction. This is wrong as instructions
* might be split.
*
* Given an address instruction, returns the number of address cycle to issue.
*/
unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
unsigned int instr_idx)
{
int start_off, end_off;
if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
return 0;
start_off = nand_subop_get_addr_start_off(subop, instr_idx);
if (instr_idx == subop->ninstrs - 1 &&
subop->last_instr_end_off)
end_off = subop->last_instr_end_off;
else
end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
return end_off - start_off;
}
EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
/**
* nand_subop_get_data_start_off - Get the start offset in a data array
* @subop: The entire sub-operation
* @instr_idx: Index of the instruction inside the sub-operation
*
* During driver development, one could be tempted to directly use the
* ->data->buf.{in,out} field of data instructions. This is wrong as data
* instructions might be split.
*
* Given a data instruction, returns the offset to start from.
*/
unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
unsigned int instr_idx)
{
if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
!nand_instr_is_data(&subop->instrs[instr_idx])))
return 0;
return nand_subop_get_start_off(subop, instr_idx);
}
EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
/**
* nand_subop_get_data_len - Get the number of bytes to retrieve
* @subop: The entire sub-operation
* @instr_idx: Index of the instruction inside the sub-operation
*
* During driver development, one could be tempted to directly use the
* ->data->len field of a data instruction. This is wrong as data instructions
* might be split.
*
* Returns the length of the chunk of data to send/receive.
*/
unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
unsigned int instr_idx)
{
int start_off = 0, end_off;
if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
!nand_instr_is_data(&subop->instrs[instr_idx])))
return 0;
start_off = nand_subop_get_data_start_off(subop, instr_idx);
if (instr_idx == subop->ninstrs - 1 &&
subop->last_instr_end_off)
end_off = subop->last_instr_end_off;
else
end_off = subop->instrs[instr_idx].ctx.data.len;
return end_off - start_off;
}
EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
/**
* nand_reset - Reset and initialize a NAND device
* @chip: The NAND chip
* @chipnr: Internal die id
*
* Save the timings data structure, then apply SDR timings mode 0 (see
* nand_reset_interface for details), do the reset operation, and apply
* back the previous timings.
*
* Returns 0 on success, a negative error code otherwise.
*/
int nand_reset(struct nand_chip *chip, int chipnr)
{
int ret;
ret = nand_reset_interface(chip, chipnr);
if (ret)
return ret;
/*
* The CS line has to be released before we can apply the new NAND
* interface settings, hence this weird nand_select_target()
* nand_deselect_target() dance.
*/
nand_select_target(chip, chipnr);
ret = nand_reset_op(chip);
nand_deselect_target(chip);
if (ret)
return ret;
ret = nand_setup_interface(chip, chipnr);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(nand_reset);
/**
* nand_get_features - wrapper to perform a GET_FEATURE
* @chip: NAND chip info structure
* @addr: feature address
* @subfeature_param: the subfeature parameters, a four bytes array
*
* Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
* operation cannot be handled.
*/
int nand_get_features(struct nand_chip *chip, int addr,
u8 *subfeature_param)
{
if (!nand_supports_get_features(chip, addr))
return -ENOTSUPP;
if (chip->legacy.get_features)
return chip->legacy.get_features(chip, addr, subfeature_param);
return nand_get_features_op(chip, addr, subfeature_param);
}
/**
* nand_set_features - wrapper to perform a SET_FEATURE
* @chip: NAND chip info structure
* @addr: feature address
* @subfeature_param: the subfeature parameters, a four bytes array
*
* Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
* operation cannot be handled.
*/
int nand_set_features(struct nand_chip *chip, int addr,
u8 *subfeature_param)
{
if (!nand_supports_set_features(chip, addr))
return -ENOTSUPP;
if (chip->legacy.set_features)
return chip->legacy.set_features(chip, addr, subfeature_param);
return nand_set_features_op(chip, addr, subfeature_param);
}
/**
* nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
* @buf: buffer to test
* @len: buffer length
* @bitflips_threshold: maximum number of bitflips
*
* Check if a buffer contains only 0xff, which means the underlying region
* has been erased and is ready to be programmed.
* The bitflips_threshold specify the maximum number of bitflips before
* considering the region is not erased.
* Note: The logic of this function has been extracted from the memweight
* implementation, except that nand_check_erased_buf function exit before
* testing the whole buffer if the number of bitflips exceed the
* bitflips_threshold value.
*
* Returns a positive number of bitflips less than or equal to
* bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
* threshold.
*/
static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
{
const unsigned char *bitmap = buf;
int bitflips = 0;
int weight;
for (; len && ((uintptr_t)bitmap) % sizeof(long);
len--, bitmap++) {
weight = hweight8(*bitmap);
bitflips += BITS_PER_BYTE - weight;
if (unlikely(bitflips > bitflips_threshold))
return -EBADMSG;
}
for (; len >= sizeof(long);
len -= sizeof(long), bitmap += sizeof(long)) {
unsigned long d = *((unsigned long *)bitmap);
if (d == ~0UL)
continue;
weight = hweight_long(d);
bitflips += BITS_PER_LONG - weight;
if (unlikely(bitflips > bitflips_threshold))
return -EBADMSG;
}
for (; len > 0; len--, bitmap++) {
weight = hweight8(*bitmap);
bitflips += BITS_PER_BYTE - weight;
if (unlikely(bitflips > bitflips_threshold))
return -EBADMSG;
}
return bitflips;
}
/**
* nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
* 0xff data
* @data: data buffer to test
* @datalen: data length
* @ecc: ECC buffer
* @ecclen: ECC length
* @extraoob: extra OOB buffer
* @extraooblen: extra OOB length
* @bitflips_threshold: maximum number of bitflips
*
* Check if a data buffer and its associated ECC and OOB data contains only
* 0xff pattern, which means the underlying region has been erased and is
* ready to be programmed.
* The bitflips_threshold specify the maximum number of bitflips before
* considering the region as not erased.
*
* Note:
* 1/ ECC algorithms are working on pre-defined block sizes which are usually
* different from the NAND page size. When fixing bitflips, ECC engines will
* report the number of errors per chunk, and the NAND core infrastructure
* expect you to return the maximum number of bitflips for the whole page.
* This is why you should always use this function on a single chunk and
* not on the whole page. After checking each chunk you should update your
* max_bitflips value accordingly.
* 2/ When checking for bitflips in erased pages you should not only check
* the payload data but also their associated ECC data, because a user might
* have programmed almost all bits to 1 but a few. In this case, we
* shouldn't consider the chunk as erased, and checking ECC bytes prevent
* this case.
* 3/ The extraoob argument is optional, and should be used if some of your OOB
* data are protected by the ECC engine.
* It could also be used if you support subpages and want to attach some
* extra OOB data to an ECC chunk.
*
* Returns a positive number of bitflips less than or equal to
* bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
* threshold. In case of success, the passed buffers are filled with 0xff.
*/
int nand_check_erased_ecc_chunk(void *data, int datalen,
void *ecc, int ecclen,
void *extraoob, int extraooblen,
int bitflips_threshold)
{
int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
data_bitflips = nand_check_erased_buf(data, datalen,
bitflips_threshold);
if (data_bitflips < 0)
return data_bitflips;
bitflips_threshold -= data_bitflips;
ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
if (ecc_bitflips < 0)
return ecc_bitflips;
bitflips_threshold -= ecc_bitflips;
extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
bitflips_threshold);
if (extraoob_bitflips < 0)
return extraoob_bitflips;
if (data_bitflips)
memset(data, 0xff, datalen);
if (ecc_bitflips)
memset(ecc, 0xff, ecclen);
if (extraoob_bitflips)
memset(extraoob, 0xff, extraooblen);
return data_bitflips + ecc_bitflips + extraoob_bitflips;
}
EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
/**
* nand_read_page_raw_notsupp - dummy read raw page function
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* Returns -ENOTSUPP unconditionally.
*/
int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
return -ENOTSUPP;
}
/**
* nand_read_page_raw - [INTERN] read raw page data without ecc
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (ret)
return ret;
if (oob_required) {
ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
false, false);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL(nand_read_page_raw);
/**
* nand_monolithic_read_page_raw - Monolithic page read in raw mode
* @chip: NAND chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* This is a raw page read, ie. without any error detection/correction.
* Monolithic means we are requesting all the relevant data (main plus
* eventually OOB) to be loaded in the NAND cache and sent over the
* bus (from the NAND chip to the NAND controller) in a single
* operation. This is an alternative to nand_read_page_raw(), which
* first reads the main data, and if the OOB data is requested too,
* then reads more data on the bus.
*/
int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int size = mtd->writesize;
u8 *read_buf = buf;
int ret;
if (oob_required) {
size += mtd->oobsize;
if (buf != chip->data_buf)
read_buf = nand_get_data_buf(chip);
}
ret = nand_read_page_op(chip, page, 0, read_buf, size);
if (ret)
return ret;
if (buf != chip->data_buf)
memcpy(buf, read_buf, mtd->writesize);
return 0;
}
EXPORT_SYMBOL(nand_monolithic_read_page_raw);
/**
* nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* We need a special oob layout and handling even when OOB isn't used.
*/
static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint8_t *oob = chip->oob_poi;
int steps, size, ret;
ret = nand_read_page_op(chip, page, 0, NULL, 0);
if (ret)
return ret;
for (steps = chip->ecc.steps; steps > 0; steps--) {
ret = nand_read_data_op(chip, buf, eccsize, false, false);
if (ret)
return ret;
buf += eccsize;
if (chip->ecc.prepad) {
ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
false, false);
if (ret)
return ret;
oob += chip->ecc.prepad;
}
ret = nand_read_data_op(chip, oob, eccbytes, false, false);
if (ret)
return ret;
oob += eccbytes;
if (chip->ecc.postpad) {
ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
false, false);
if (ret)
return ret;
oob += chip->ecc.postpad;
}
}
size = mtd->oobsize - (oob - chip->oob_poi);
if (size) {
ret = nand_read_data_op(chip, oob, size, false, false);
if (ret)
return ret;
}
return 0;
}
/**
* nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*/
static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
uint8_t *ecc_calc = chip->ecc.calc_buf;
uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
chip->ecc.read_page_raw(chip, buf, 1, page);
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
chip->ecc.calculate(chip, p, &ecc_calc[i]);
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
eccsteps = chip->ecc.steps;
p = buf;
for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
return max_bitflips;
}
/**
* nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
* @chip: nand chip info structure
* @data_offs: offset of requested data within the page
* @readlen: data length
* @bufpoi: buffer to store read data
* @page: page number to read
*/
static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
uint32_t readlen, uint8_t *bufpoi, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int start_step, end_step, num_steps, ret;
uint8_t *p;
int data_col_addr, i, gaps = 0;
int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
int index, section = 0;
unsigned int max_bitflips = 0;
struct mtd_oob_region oobregion = { };
/* Column address within the page aligned to ECC size (256bytes) */
start_step = data_offs / chip->ecc.size;
end_step = (data_offs + readlen - 1) / chip->ecc.size;
num_steps = end_step - start_step + 1;
index = start_step * chip->ecc.bytes;
/* Data size aligned to ECC ecc.size */
datafrag_len = num_steps * chip->ecc.size;
eccfrag_len = num_steps * chip->ecc.bytes;
data_col_addr = start_step * chip->ecc.size;
/* If we read not a page aligned data */
p = bufpoi + data_col_addr;
ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
if (ret)
return ret;
/* Calculate ECC */
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
/*
* The performance is faster if we position offsets according to
* ecc.pos. Let's make sure that there are no gaps in ECC positions.
*/
ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion);
if (ret)
return ret;
if (oobregion.length < eccfrag_len)
gaps = 1;
if (gaps) {
ret = nand_change_read_column_op(chip, mtd->writesize,
chip->oob_poi, mtd->oobsize,
false);
if (ret)
return ret;
} else {
/*
* Send the command to read the particular ECC bytes take care
* about buswidth alignment in read_buf.
*/
aligned_pos = oobregion.offset & ~(busw - 1);
aligned_len = eccfrag_len;
if (oobregion.offset & (busw - 1))
aligned_len++;
if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
(busw - 1))
aligned_len++;
ret = nand_change_read_column_op(chip,
mtd->writesize + aligned_pos,
&chip->oob_poi[aligned_pos],
aligned_len, false);
if (ret)
return ret;
}
ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
chip->oob_poi, index, eccfrag_len);
if (ret)
return ret;
p = bufpoi + data_col_addr;
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
int stat;
stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
&chip->ecc.calc_buf[i]);
if (stat == -EBADMSG &&
(chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
/* check for empty pages with bitflips */
stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
&chip->ecc.code_buf[i],
chip->ecc.bytes,
NULL, 0,
chip->ecc.strength);
}
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
return max_bitflips;
}
/**
* nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* Not for syndrome calculating ECC controllers which need a special oob layout.
*/
static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
uint8_t *ecc_calc = chip->ecc.calc_buf;
uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
ret = nand_read_page_op(chip, page, 0, NULL, 0);
if (ret)
return ret;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(chip, NAND_ECC_READ);
ret = nand_read_data_op(chip, p, eccsize, false, false);
if (ret)
return ret;
chip->ecc.calculate(chip, p, &ecc_calc[i]);
}
ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
false);
if (ret)
return ret;
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
eccsteps = chip->ecc.steps;
p = buf;
for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
if (stat == -EBADMSG &&
(chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
/* check for empty pages with bitflips */
stat = nand_check_erased_ecc_chunk(p, eccsize,
&ecc_code[i], eccbytes,
NULL, 0,
chip->ecc.strength);
}
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
return max_bitflips;
}
/**
* nand_read_page_hwecc_oob_first - Hardware ECC page read with ECC
* data read from OOB area
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* Hardware ECC for large page chips, which requires the ECC data to be
* extracted from the OOB before the actual data is read.
*/
int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
/* Read the OOB area first */
ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
if (ret)
return ret;
/* Move read cursor to start of page */
ret = nand_change_read_column_op(chip, 0, NULL, 0, false);
if (ret)
return ret;
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
chip->ecc.hwctl(chip, NAND_ECC_READ);
ret = nand_read_data_op(chip, p, eccsize, false, false);
if (ret)
return ret;
stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
if (stat == -EBADMSG &&
(chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
/* check for empty pages with bitflips */
stat = nand_check_erased_ecc_chunk(p, eccsize,
&ecc_code[i],
eccbytes, NULL, 0,
chip->ecc.strength);
}
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
return max_bitflips;
}
EXPORT_SYMBOL_GPL(nand_read_page_hwecc_oob_first);
/**
* nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* The hw generator calculates the error syndrome automatically. Therefore we
* need a special oob layout and handling.
*/
static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret, i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
uint8_t *p = buf;
uint8_t *oob = chip->oob_poi;
unsigned int max_bitflips = 0;
ret = nand_read_page_op(chip, page, 0, NULL, 0);
if (ret)
return ret;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
chip->ecc.hwctl(chip, NAND_ECC_READ);
ret = nand_read_data_op(chip, p, eccsize, false, false);
if (ret)
return ret;
if (chip->ecc.prepad) {
ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
false, false);
if (ret)
return ret;
oob += chip->ecc.prepad;
}
chip->ecc.hwctl(chip, NAND_ECC_READSYN);
ret = nand_read_data_op(chip, oob, eccbytes, false, false);
if (ret)
return ret;
stat = chip->ecc.correct(chip, p, oob, NULL);
oob += eccbytes;
if (chip->ecc.postpad) {
ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
false, false);
if (ret)
return ret;
oob += chip->ecc.postpad;
}
if (stat == -EBADMSG &&
(chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
/* check for empty pages with bitflips */
stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
oob - eccpadbytes,
eccpadbytes,
NULL, 0,
chip->ecc.strength);
}
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
/* Calculate remaining oob bytes */
i = mtd->oobsize - (oob - chip->oob_poi);
if (i) {
ret = nand_read_data_op(chip, oob, i, false, false);
if (ret)
return ret;
}
return max_bitflips;
}
/**
* nand_transfer_oob - [INTERN] Transfer oob to client buffer
* @chip: NAND chip object
* @oob: oob destination address
* @ops: oob ops structure
* @len: size of oob to transfer
*/
static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
struct mtd_oob_ops *ops, size_t len)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
case MTD_OPS_RAW:
memcpy(oob, chip->oob_poi + ops->ooboffs, len);
return oob + len;
case MTD_OPS_AUTO_OOB:
ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
ops->ooboffs, len);
BUG_ON(ret);
return oob + len;
default:
BUG();
}
return NULL;
}
static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page,
u32 readlen, int col)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (!chip->controller->supported_op.cont_read)
return;
if ((col && col + readlen < (3 * mtd->writesize)) ||
(!col && readlen < (2 * mtd->writesize))) {
chip->cont_read.ongoing = false;
return;
}
chip->cont_read.ongoing = true;
chip->cont_read.first_page = page;
if (col)
chip->cont_read.first_page++;
chip->cont_read.last_page = page + ((readlen >> chip->page_shift) & chip->pagemask);
}
/**
* nand_setup_read_retry - [INTERN] Set the READ RETRY mode
* @chip: NAND chip object
* @retry_mode: the retry mode to use
*
* Some vendors supply a special command to shift the Vt threshold, to be used
* when there are too many bitflips in a page (i.e., ECC error). After setting
* a new threshold, the host should retry reading the page.
*/
static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
{
pr_debug("setting READ RETRY mode %d\n", retry_mode);
if (retry_mode >= chip->read_retries)
return -EINVAL;
if (!chip->ops.setup_read_retry)
return -EOPNOTSUPP;
return chip->ops.setup_read_retry(chip, retry_mode);
}
static void nand_wait_readrdy(struct nand_chip *chip)
{
const struct nand_interface_config *conf;
if (!(chip->options & NAND_NEED_READRDY))
return;
conf = nand_get_interface_config(chip);
WARN_ON(nand_wait_rdy_op(chip, NAND_COMMON_TIMING_MS(conf, tR_max), 0));
}
/**
* nand_do_read_ops - [INTERN] Read data with ECC
* @chip: NAND chip object
* @from: offset to read from
* @ops: oob ops structure
*
* Internal function. Called with chip held.
*/
static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
struct mtd_oob_ops *ops)
{
int chipnr, page, realpage, col, bytes, aligned, oob_required;
struct mtd_info *mtd = nand_to_mtd(chip);
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
uint32_t max_oobsize = mtd_oobavail(mtd, ops);
uint8_t *bufpoi, *oob, *buf;
int use_bounce_buf;
unsigned int max_bitflips = 0;
int retry_mode = 0;
bool ecc_fail = false;
/* Check if the region is secured */
if (nand_region_is_secured(chip, from, readlen))
return -EIO;
chipnr = (int)(from >> chip->chip_shift);
nand_select_target(chip, chipnr);
realpage = (int)(from >> chip->page_shift);
page = realpage & chip->pagemask;
col = (int)(from & (mtd->writesize - 1));
buf = ops->datbuf;
oob = ops->oobbuf;
oob_required = oob ? 1 : 0;
rawnand_enable_cont_reads(chip, page, readlen, col);
while (1) {
struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
bytes = min(mtd->writesize - col, readlen);
aligned = (bytes == mtd->writesize);
if (!aligned)
use_bounce_buf = 1;
else if (chip->options & NAND_USES_DMA)
use_bounce_buf = !virt_addr_valid(buf) ||
!IS_ALIGNED((unsigned long)buf,
chip->buf_align);
else
use_bounce_buf = 0;
/* Is the current page in the buffer? */
if (realpage != chip->pagecache.page || oob) {
bufpoi = use_bounce_buf ? chip->data_buf : buf;
if (use_bounce_buf && aligned)
pr_debug("%s: using read bounce buffer for buf@%p\n",
__func__, buf);
read_retry:
/*
* Now read the page into the buffer. Absent an error,
* the read methods return max bitflips per ecc step.
*/
if (unlikely(ops->mode == MTD_OPS_RAW))
ret = chip->ecc.read_page_raw(chip, bufpoi,
oob_required,
page);
else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
!oob)
ret = chip->ecc.read_subpage(chip, col, bytes,
bufpoi, page);
else
ret = chip->ecc.read_page(chip, bufpoi,
oob_required, page);
if (ret < 0) {
if (use_bounce_buf)
/* Invalidate page cache */
chip->pagecache.page = -1;
break;
}
/*
* Copy back the data in the initial buffer when reading
* partial pages or when a bounce buffer is required.
*/
if (use_bounce_buf) {
if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
!(mtd->ecc_stats.failed - ecc_stats.failed) &&
(ops->mode != MTD_OPS_RAW)) {
chip->pagecache.page = realpage;
chip->pagecache.bitflips = ret;
} else {
/* Invalidate page cache */
chip->pagecache.page = -1;
}
memcpy(buf, bufpoi + col, bytes);
}
if (unlikely(oob)) {
int toread = min(oobreadlen, max_oobsize);
if (toread) {
oob = nand_transfer_oob(chip, oob, ops,
toread);
oobreadlen -= toread;
}
}
nand_wait_readrdy(chip);
if (mtd->ecc_stats.failed - ecc_stats.failed) {
if (retry_mode + 1 < chip->read_retries) {
retry_mode++;
ret = nand_setup_read_retry(chip,
retry_mode);
if (ret < 0)
break;
/* Reset ecc_stats; retry */
mtd->ecc_stats = ecc_stats;
goto read_retry;
} else {
/* No more retry modes; real failure */
ecc_fail = true;
}
}
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips, ret);
} else {
memcpy(buf, chip->data_buf + col, bytes);
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips,
chip->pagecache.bitflips);
}
readlen -= bytes;
/* Reset to retry mode 0 */
if (retry_mode) {
ret = nand_setup_read_retry(chip, 0);
if (ret < 0)
break;
retry_mode = 0;
}
if (!readlen)
break;
/* For subsequent reads align to page boundary */
col = 0;
/* Increment page address */
realpage++;
page = realpage & chip->pagemask;
/* Check, if we cross a chip boundary */
if (!page) {
chipnr++;
nand_deselect_target(chip);
nand_select_target(chip, chipnr);
}
}
nand_deselect_target(chip);
ops->retlen = ops->len - (size_t) readlen;
if (oob)
ops->oobretlen = ops->ooblen - oobreadlen;
if (ret < 0)
return ret;
if (ecc_fail)
return -EBADMSG;
return max_bitflips;
}
/**
* nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
* @chip: nand chip info structure
* @page: page number to read
*/
int nand_read_oob_std(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
EXPORT_SYMBOL(nand_read_oob_std);
/**
* nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
* with syndromes
* @chip: nand chip info structure
* @page: page number to read
*/
static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int length = mtd->oobsize;
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size;
uint8_t *bufpoi = chip->oob_poi;
int i, toread, sndrnd = 0, pos, ret;
ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
if (ret)
return ret;
for (i = 0; i < chip->ecc.steps; i++) {
if (sndrnd) {
int ret;
pos = eccsize + i * (eccsize + chunk);
if (mtd->writesize > 512)
ret = nand_change_read_column_op(chip, pos,
NULL, 0,
false);
else
ret = nand_read_page_op(chip, page, pos, NULL,
0);
if (ret)
return ret;
} else
sndrnd = 1;
toread = min_t(int, length, chunk);
ret = nand_read_data_op(chip, bufpoi, toread, false, false);
if (ret)
return ret;
bufpoi += toread;
length -= toread;
}
if (length > 0) {
ret = nand_read_data_op(chip, bufpoi, length, false, false);
if (ret)
return ret;
}
return 0;
}
/**
* nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
* @chip: nand chip info structure
* @page: page number to write
*/
int nand_write_oob_std(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
mtd->oobsize);
}
EXPORT_SYMBOL(nand_write_oob_std);
/**
* nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
* with syndrome - only for large page flash
* @chip: nand chip info structure
* @page: page number to write
*/
static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size, length = mtd->oobsize;
int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
const uint8_t *bufpoi = chip->oob_poi;
/*
* data-ecc-data-ecc ... ecc-oob
* or
* data-pad-ecc-pad-data-pad .... ecc-pad-oob
*/
if (!chip->ecc.prepad && !chip->ecc.postpad) {
pos = steps * (eccsize + chunk);
steps = 0;
} else
pos = eccsize;
ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
if (ret)
return ret;
for (i = 0; i < steps; i++) {
if (sndcmd) {
if (mtd->writesize <= 512) {
uint32_t fill = 0xFFFFFFFF;
len = eccsize;
while (len > 0) {
int num = min_t(int, len, 4);
ret = nand_write_data_op(chip, &fill,
num, false);
if (ret)
return ret;
len -= num;
}
} else {
pos = eccsize + i * (eccsize + chunk);
ret = nand_change_write_column_op(chip, pos,
NULL, 0,
false);
if (ret)
return ret;
}
} else
sndcmd = 1;
len = min_t(int, length, chunk);
ret = nand_write_data_op(chip, bufpoi, len, false);
if (ret)
return ret;
bufpoi += len;
length -= len;
}
if (length > 0) {
ret = nand_write_data_op(chip, bufpoi, length, false);
if (ret)
return ret;
}
return nand_prog_page_end_op(chip);
}
/**
* nand_do_read_oob - [INTERN] NAND read out-of-band
* @chip: NAND chip object
* @from: offset to read from
* @ops: oob operations description structure
*
* NAND read out-of-band data from the spare area.
*/
static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
struct mtd_oob_ops *ops)
{
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int max_bitflips = 0;
int page, realpage, chipnr;
struct mtd_ecc_stats stats;
int readlen = ops->ooblen;
int len;
uint8_t *buf = ops->oobbuf;
int ret = 0;
pr_debug("%s: from = 0x%08Lx, len = %i\n",
__func__, (unsigned long long)from, readlen);
/* Check if the region is secured */
if (nand_region_is_secured(chip, from, readlen))
return -EIO;
stats = mtd->ecc_stats;
len = mtd_oobavail(mtd, ops);
chipnr = (int)(from >> chip->chip_shift);
nand_select_target(chip, chipnr);
/* Shift to get page */
realpage = (int)(from >> chip->page_shift);
page = realpage & chip->pagemask;
while (1) {
if (ops->mode == MTD_OPS_RAW)
ret = chip->ecc.read_oob_raw(chip, page);
else
ret = chip->ecc.read_oob(chip, page);
if (ret < 0)
break;
len = min(len, readlen);
buf = nand_transfer_oob(chip, buf, ops, len);
nand_wait_readrdy(chip);
max_bitflips = max_t(unsigned int, max_bitflips, ret);
readlen -= len;
if (!readlen)
break;
/* Increment page address */
realpage++;
page = realpage & chip->pagemask;
/* Check, if we cross a chip boundary */
if (!page) {
chipnr++;
nand_deselect_target(chip);
nand_select_target(chip, chipnr);
}
}
nand_deselect_target(chip);
ops->oobretlen = ops->ooblen - readlen;
if (ret < 0)
return ret;
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
return max_bitflips;
}
/**
* nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
* @mtd: MTD device structure
* @from: offset to read from
* @ops: oob operation description structure
*
* NAND read data and/or out-of-band data.
*/
static int nand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mtd_ecc_stats old_stats;
int ret;
ops->retlen = 0;
if (ops->mode != MTD_OPS_PLACE_OOB &&
ops->mode != MTD_OPS_AUTO_OOB &&
ops->mode != MTD_OPS_RAW)
return -ENOTSUPP;
nand_get_device(chip);
old_stats = mtd->ecc_stats;
if (!ops->datbuf)
ret = nand_do_read_oob(chip, from, ops);
else
ret = nand_do_read_ops(chip, from, ops);
if (ops->stats) {
ops->stats->uncorrectable_errors +=
mtd->ecc_stats.failed - old_stats.failed;
ops->stats->corrected_bitflips +=
mtd->ecc_stats.corrected - old_stats.corrected;
}
nand_release_device(chip);
return ret;
}
/**
* nand_write_page_raw_notsupp - dummy raw page write function
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
*
* Returns -ENOTSUPP unconditionally.
*/
int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
return -ENOTSUPP;
}
/**
* nand_write_page_raw - [INTERN] raw page write function
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
*
* Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
if (ret)
return ret;
if (oob_required) {
ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
false);
if (ret)
return ret;
}
return nand_prog_page_end_op(chip);
}
EXPORT_SYMBOL(nand_write_page_raw);
/**
* nand_monolithic_write_page_raw - Monolithic page write in raw mode
* @chip: NAND chip info structure
* @buf: data buffer to write
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
*
* This is a raw page write, ie. without any error detection/correction.
* Monolithic means we are requesting all the relevant data (main plus
* eventually OOB) to be sent over the bus and effectively programmed
* into the NAND chip arrays in a single operation. This is an
* alternative to nand_write_page_raw(), which first sends the main
* data, then eventually send the OOB data by latching more data
* cycles on the NAND bus, and finally sends the program command to
* synchronyze the NAND chip cache.
*/
int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int size = mtd->writesize;
u8 *write_buf = (u8 *)buf;
if (oob_required) {
size += mtd->oobsize;
if (buf != chip->data_buf) {
write_buf = nand_get_data_buf(chip);
memcpy(write_buf, buf, mtd->writesize);
}
}
return nand_prog_page_op(chip, page, 0, write_buf, size);
}
EXPORT_SYMBOL(nand_monolithic_write_page_raw);
/**
* nand_write_page_raw_syndrome - [INTERN] raw page write function
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
*
* We need a special oob layout and handling even when ECC isn't checked.
*/
static int nand_write_page_raw_syndrome(struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint8_t *oob = chip->oob_poi;
int steps, size, ret;
ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
if (ret)
return ret;
for (steps = chip->ecc.steps; steps > 0; steps--) {
ret = nand_write_data_op(chip, buf, eccsize, false);
if (ret)
return ret;
buf += eccsize;
if (chip->ecc.prepad) {
ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
false);
if (ret)
return ret;
oob += chip->ecc.prepad;
}
ret = nand_write_data_op(chip, oob, eccbytes, false);
if (ret)
return ret;
oob += eccbytes;
if (chip->ecc.postpad) {
ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
false);
if (ret)
return ret;
oob += chip->ecc.postpad;
}
}
size = mtd->oobsize - (oob - chip->oob_poi);
if (size) {
ret = nand_write_data_op(chip, oob, size, false);
if (ret)
return ret;
}
return nand_prog_page_end_op(chip);
}
/**
* nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
*/
static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *ecc_calc = chip->ecc.calc_buf;
const uint8_t *p = buf;
/* Software ECC calculation */
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
chip->ecc.calculate(chip, p, &ecc_calc[i]);
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
return chip->ecc.write_page_raw(chip, buf, 1, page);
}
/**
* nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
*/
static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *ecc_calc = chip->ecc.calc_buf;
const uint8_t *p = buf;
ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
if (ret)
return ret;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(chip, NAND_ECC_WRITE);
ret = nand_write_data_op(chip, p, eccsize, false);
if (ret)
return ret;
chip->ecc.calculate(chip, p, &ecc_calc[i]);
}
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
if (ret)
return ret;
return nand_prog_page_end_op(chip);
}
/**
* nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
* @chip: nand chip info structure
* @offset: column address of subpage within the page
* @data_len: data length
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
*/
static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
uint32_t data_len, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
uint8_t *oob_buf = chip->oob_poi;
uint8_t *ecc_calc = chip->ecc.calc_buf;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
int ecc_steps = chip->ecc.steps;
uint32_t start_step = offset / ecc_size;
uint32_t end_step = (offset + data_len - 1) / ecc_size;
int oob_bytes = mtd->oobsize / ecc_steps;
int step, ret;
ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
if (ret)
return ret;
for (step = 0; step < ecc_steps; step++) {
/* configure controller for WRITE access */
chip->ecc.hwctl(chip, NAND_ECC_WRITE);
/* write data (untouched subpages already masked by 0xFF) */
ret = nand_write_data_op(chip, buf, ecc_size, false);
if (ret)
return ret;
/* mask ECC of un-touched subpages by padding 0xFF */
if ((step < start_step) || (step > end_step))
memset(ecc_calc, 0xff, ecc_bytes);
else
chip->ecc.calculate(chip, buf, ecc_calc);
/* mask OOB of un-touched subpages by padding 0xFF */
/* if oob_required, preserve OOB metadata of written subpage */
if (!oob_required || (step < start_step) || (step > end_step))
memset(oob_buf, 0xff, oob_bytes);
buf += ecc_size;
ecc_calc += ecc_bytes;
oob_buf += oob_bytes;
}
/* copy calculated ECC for whole page to chip->buffer->oob */
/* this include masked-value(0xFF) for unwritten subpages */
ecc_calc = chip->ecc.calc_buf;
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
/* write OOB buffer to NAND device */
ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
if (ret)
return ret;
return nand_prog_page_end_op(chip);
}
/**
* nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
*
* The hw generator calculates the error syndrome automatically. Therefore we
* need a special oob layout and handling.
*/
static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
const uint8_t *p = buf;
uint8_t *oob = chip->oob_poi;
int ret;
ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
if (ret)
return ret;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(chip, NAND_ECC_WRITE);
ret = nand_write_data_op(chip, p, eccsize, false);
if (ret)
return ret;
if (chip->ecc.prepad) {
ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
false);
if (ret)
return ret;
oob += chip->ecc.prepad;
}
chip->ecc.calculate(chip, p, oob);
ret = nand_write_data_op(chip, oob, eccbytes, false);
if (ret)
return ret;
oob += eccbytes;
if (chip->ecc.postpad) {
ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
false);
if (ret)
return ret;
oob += chip->ecc.postpad;
}
}
/* Calculate remaining oob bytes */
i = mtd->oobsize - (oob - chip->oob_poi);
if (i) {
ret = nand_write_data_op(chip, oob, i, false);
if (ret)
return ret;
}
return nand_prog_page_end_op(chip);
}
/**
* nand_write_page - write one page
* @chip: NAND chip descriptor
* @offset: address offset within the page
* @data_len: length of actual data to be written
* @buf: the data to write
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
* @raw: use _raw version of write_page
*/
static int nand_write_page(struct nand_chip *chip, uint32_t offset,
int data_len, const uint8_t *buf, int oob_required,
int page, int raw)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int status, subpage;
if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
chip->ecc.write_subpage)
subpage = offset || (data_len < mtd->writesize);
else
subpage = 0;
if (unlikely(raw))
status = chip->ecc.write_page_raw(chip, buf, oob_required,
page);
else if (subpage)
status = chip->ecc.write_subpage(chip, offset, data_len, buf,
oob_required, page);
else
status = chip->ecc.write_page(chip, buf, oob_required, page);
if (status < 0)
return status;
return 0;
}
#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
/**
* nand_do_write_ops - [INTERN] NAND write with ECC
* @chip: NAND chip object
* @to: offset to write to
* @ops: oob operations description structure
*
* NAND write with ECC.
*/
static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
struct mtd_oob_ops *ops)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int chipnr, realpage, page, column;
uint32_t writelen = ops->len;
uint32_t oobwritelen = ops->ooblen;
uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
uint8_t *oob = ops->oobbuf;
uint8_t *buf = ops->datbuf;
int ret;
int oob_required = oob ? 1 : 0;
ops->retlen = 0;
if (!writelen)
return 0;
/* Reject writes, which are not page aligned */
if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
pr_notice("%s: attempt to write non page aligned data\n",
__func__);
return -EINVAL;
}
/* Check if the region is secured */
if (nand_region_is_secured(chip, to, writelen))
return -EIO;
column = to & (mtd->writesize - 1);
chipnr = (int)(to >> chip->chip_shift);
nand_select_target(chip, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(chip)) {
ret = -EIO;
goto err_out;
}
realpage = (int)(to >> chip->page_shift);
page = realpage & chip->pagemask;
/* Invalidate the page cache, when we write to the cached page */
if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
chip->pagecache.page = -1;
/* Don't allow multipage oob writes with offset */
if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
ret = -EINVAL;
goto err_out;
}
while (1) {
int bytes = mtd->writesize;
uint8_t *wbuf = buf;
int use_bounce_buf;
int part_pagewr = (column || writelen < mtd->writesize);
if (part_pagewr)
use_bounce_buf = 1;
else if (chip->options & NAND_USES_DMA)
use_bounce_buf = !virt_addr_valid(buf) ||
!IS_ALIGNED((unsigned long)buf,
chip->buf_align);
else
use_bounce_buf = 0;
/*
* Copy the data from the initial buffer when doing partial page
* writes or when a bounce buffer is required.
*/
if (use_bounce_buf) {
pr_debug("%s: using write bounce buffer for buf@%p\n",
__func__, buf);
if (part_pagewr)
bytes = min_t(int, bytes - column, writelen);
wbuf = nand_get_data_buf(chip);
memset(wbuf, 0xff, mtd->writesize);
memcpy(&wbuf[column], buf, bytes);
}
if (unlikely(oob)) {
size_t len = min(oobwritelen, oobmaxlen);
oob = nand_fill_oob(chip, oob, len, ops);
oobwritelen -= len;
} else {
/* We still need to erase leftover OOB data */
memset(chip->oob_poi, 0xff, mtd->oobsize);
}
ret = nand_write_page(chip, column, bytes, wbuf,
oob_required, page,
(ops->mode == MTD_OPS_RAW));
if (ret)
break;
writelen -= bytes;
if (!writelen)
break;
column = 0;
buf += bytes;
realpage++;
page = realpage & chip->pagemask;
/* Check, if we cross a chip boundary */
if (!page) {
chipnr++;
nand_deselect_target(chip);
nand_select_target(chip, chipnr);
}
}
ops->retlen = ops->len - writelen;
if (unlikely(oob))
ops->oobretlen = ops->ooblen;
err_out:
nand_deselect_target(chip);
return ret;
}
/**
* panic_nand_write - [MTD Interface] NAND write with ECC
* @mtd: MTD device structure
* @to: offset to write to
* @len: number of bytes to write
* @retlen: pointer to variable to store the number of written bytes
* @buf: the data to write
*
* NAND write with ECC. Used when performing writes in interrupt context, this
* may for example be called by mtdoops when writing an oops while in panic.
*/
static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
struct nand_chip *chip = mtd_to_nand(mtd);
int chipnr = (int)(to >> chip->chip_shift);
struct mtd_oob_ops ops;
int ret;
nand_select_target(chip, chipnr);
/* Wait for the device to get ready */
panic_nand_wait(chip, 400);
memset(&ops, 0, sizeof(ops));
ops.len = len;
ops.datbuf = (uint8_t *)buf;
ops.mode = MTD_OPS_PLACE_OOB;
ret = nand_do_write_ops(chip, to, &ops);
*retlen = ops.retlen;
return ret;
}
/**
* nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
* @mtd: MTD device structure
* @to: offset to write to
* @ops: oob operation description structure
*/
static int nand_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
struct nand_chip *chip = mtd_to_nand(mtd);
int ret = 0;
ops->retlen = 0;
nand_get_device(chip);
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
case MTD_OPS_AUTO_OOB:
case MTD_OPS_RAW:
break;
default:
goto out;
}
if (!ops->datbuf)
ret = nand_do_write_oob(chip, to, ops);
else
ret = nand_do_write_ops(chip, to, ops);
out:
nand_release_device(chip);
return ret;
}
/**
* nand_erase - [MTD Interface] erase block(s)
* @mtd: MTD device structure
* @instr: erase instruction
*
* Erase one ore more blocks.
*/
static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
{
return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
}
/**
* nand_erase_nand - [INTERN] erase block(s)
* @chip: NAND chip object
* @instr: erase instruction
* @allowbbt: allow erasing the bbt area
*
* Erase one ore more blocks.
*/
int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
int allowbbt)
{
int page, pages_per_block, ret, chipnr;
loff_t len;
pr_debug("%s: start = 0x%012llx, len = %llu\n",
__func__, (unsigned long long)instr->addr,
(unsigned long long)instr->len);
if (check_offs_len(chip, instr->addr, instr->len))
return -EINVAL;
/* Check if the region is secured */
if (nand_region_is_secured(chip, instr->addr, instr->len))
return -EIO;
/* Grab the lock and see if the device is available */
nand_get_device(chip);
/* Shift to get first page */
page = (int)(instr->addr >> chip->page_shift);
chipnr = (int)(instr->addr >> chip->chip_shift);
/* Calculate pages in each block */
pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
/* Select the NAND device */
nand_select_target(chip, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(chip)) {
pr_debug("%s: device is write protected!\n",
__func__);
ret = -EIO;
goto erase_exit;
}
/* Loop through the pages */
len = instr->len;
while (len) {
loff_t ofs = (loff_t)page << chip->page_shift;
/* Check if we have a bad block, we do not erase bad blocks! */
if (nand_block_checkbad(chip, ((loff_t) page) <<
chip->page_shift, allowbbt)) {
pr_warn("%s: attempt to erase a bad block at 0x%08llx\n",
__func__, (unsigned long long)ofs);
ret = -EIO;
goto erase_exit;
}
/*
* Invalidate the page cache, if we erase the block which
* contains the current cached page.
*/
if (page <= chip->pagecache.page && chip->pagecache.page <
(page + pages_per_block))
chip->pagecache.page = -1;
ret = nand_erase_op(chip, (page & chip->pagemask) >>
(chip->phys_erase_shift - chip->page_shift));
if (ret) {
pr_debug("%s: failed erase, page 0x%08x\n",
__func__, page);
instr->fail_addr = ofs;
goto erase_exit;
}
/* Increment page address and decrement length */
len -= (1ULL << chip->phys_erase_shift);
page += pages_per_block;
/* Check, if we cross a chip boundary */
if (len && !(page & chip->pagemask)) {
chipnr++;
nand_deselect_target(chip);
nand_select_target(chip, chipnr);
}
}
ret = 0;
erase_exit:
/* Deselect and wake up anyone waiting on the device */
nand_deselect_target(chip);
nand_release_device(chip);
/* Return more or less happy */
return ret;
}
/**
* nand_sync - [MTD Interface] sync
* @mtd: MTD device structure
*
* Sync is actually a wait for chip ready function.
*/
static void nand_sync(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
pr_debug("%s: called\n", __func__);
/* Grab the lock and see if the device is available */
nand_get_device(chip);
/* Release it and go back */
nand_release_device(chip);
}
/**
* nand_block_isbad - [MTD Interface] Check if block at offset is bad
* @mtd: MTD device structure
* @offs: offset relative to mtd start
*/
static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
{
struct nand_chip *chip = mtd_to_nand(mtd);
int chipnr = (int)(offs >> chip->chip_shift);
int ret;
/* Select the NAND device */
nand_get_device(chip);
nand_select_target(chip, chipnr);
ret = nand_block_checkbad(chip, offs, 0);
nand_deselect_target(chip);
nand_release_device(chip);
return ret;
}
/**
* nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
* @mtd: MTD device structure
* @ofs: offset relative to mtd start
*/
static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
int ret;
ret = nand_block_isbad(mtd, ofs);
if (ret) {
/* If it was bad already, return success and do nothing */
if (ret > 0)
return 0;
return ret;
}
return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
}
/**
* nand_suspend - [MTD Interface] Suspend the NAND flash
* @mtd: MTD device structure
*
* Returns 0 for success or negative error code otherwise.
*/
static int nand_suspend(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
int ret = 0;
mutex_lock(&chip->lock);
if (chip->ops.suspend)
ret = chip->ops.suspend(chip);
if (!ret)
chip->suspended = 1;
mutex_unlock(&chip->lock);
return ret;
}
/**
* nand_resume - [MTD Interface] Resume the NAND flash
* @mtd: MTD device structure
*/
static void nand_resume(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
mutex_lock(&chip->lock);
if (chip->suspended) {
if (chip->ops.resume)
chip->ops.resume(chip);
chip->suspended = 0;
} else {
pr_err("%s called for a chip which is not in suspended state\n",
__func__);
}
mutex_unlock(&chip->lock);
wake_up_all(&chip->resume_wq);
}
/**
* nand_shutdown - [MTD Interface] Finish the current NAND operation and
* prevent further operations
* @mtd: MTD device structure
*/
static void nand_shutdown(struct mtd_info *mtd)
{
nand_suspend(mtd);
}
/**
* nand_lock - [MTD Interface] Lock the NAND flash
* @mtd: MTD device structure
* @ofs: offset byte address
* @len: number of bytes to lock (must be a multiple of block/page size)
*/
static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (!chip->ops.lock_area)
return -ENOTSUPP;
return chip->ops.lock_area(chip, ofs, len);
}
/**
* nand_unlock - [MTD Interface] Unlock the NAND flash
* @mtd: MTD device structure
* @ofs: offset byte address
* @len: number of bytes to unlock (must be a multiple of block/page size)
*/
static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (!chip->ops.unlock_area)
return -ENOTSUPP;
return chip->ops.unlock_area(chip, ofs, len);
}
/* Set default functions */
static void nand_set_defaults(struct nand_chip *chip)
{
/* If no controller is provided, use the dummy, legacy one. */
if (!chip->controller) {
chip->controller = &chip->legacy.dummy_controller;
nand_controller_init(chip->controller);
}
nand_legacy_set_defaults(chip);
if (!chip->buf_align)
chip->buf_align = 1;
}
/* Sanitize ONFI strings so we can safely print them */
void sanitize_string(uint8_t *s, size_t len)
{
ssize_t i;
/* Null terminate */
s[len - 1] = 0;
/* Remove non printable chars */
for (i = 0; i < len - 1; i++) {
if (s[i] < ' ' || s[i] > 127)
s[i] = '?';
}
/* Remove trailing spaces */
strim(s);
}
/*
* nand_id_has_period - Check if an ID string has a given wraparound period
* @id_data: the ID string
* @arrlen: the length of the @id_data array
* @period: the period of repitition
*
* Check if an ID string is repeated within a given sequence of bytes at
* specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
* period of 3). This is a helper function for nand_id_len(). Returns non-zero
* if the repetition has a period of @period; otherwise, returns zero.
*/
static int nand_id_has_period(u8 *id_data, int arrlen, int period)
{
int i, j;
for (i = 0; i < period; i++)
for (j = i + period; j < arrlen; j += period)
if (id_data[i] != id_data[j])
return 0;
return 1;
}
/*
* nand_id_len - Get the length of an ID string returned by CMD_READID
* @id_data: the ID string
* @arrlen: the length of the @id_data array
* Returns the length of the ID string, according to known wraparound/trailing
* zero patterns. If no pattern exists, returns the length of the array.
*/
static int nand_id_len(u8 *id_data, int arrlen)
{
int last_nonzero, period;
/* Find last non-zero byte */
for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
if (id_data[last_nonzero])
break;
/* All zeros */
if (last_nonzero < 0)
return 0;
/* Calculate wraparound period */
for (period = 1; period < arrlen; period++)
if (nand_id_has_period(id_data, arrlen, period))
break;
/* There's a repeated pattern */
if (period < arrlen)
return period;
/* There are trailing zeros */
if (last_nonzero < arrlen - 1)
return last_nonzero + 1;
/* No pattern detected */
return arrlen;
}
/* Extract the bits of per cell from the 3rd byte of the extended ID */
static int nand_get_bits_per_cell(u8 cellinfo)
{
int bits;
bits = cellinfo & NAND_CI_CELLTYPE_MSK;
bits >>= NAND_CI_CELLTYPE_SHIFT;
return bits + 1;
}
/*
* Many new NAND share similar device ID codes, which represent the size of the
* chip. The rest of the parameters must be decoded according to generic or
* manufacturer-specific "extended ID" decoding patterns.
*/
void nand_decode_ext_id(struct nand_chip *chip)
{
struct nand_memory_organization *memorg;
struct mtd_info *mtd = nand_to_mtd(chip);
int extid;
u8 *id_data = chip->id.data;
memorg = nanddev_get_memorg(&chip->base);
/* The 3rd id byte holds MLC / multichip data */
memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
/* The 4th id byte is the important one */
extid = id_data[3];
/* Calc pagesize */
memorg->pagesize = 1024 << (extid & 0x03);
mtd->writesize = memorg->pagesize;
extid >>= 2;
/* Calc oobsize */
memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
mtd->oobsize = memorg->oobsize;
extid >>= 2;
/* Calc blocksize. Blocksize is multiples of 64KiB */
memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
memorg->pagesize;
mtd->erasesize = (64 * 1024) << (extid & 0x03);
extid >>= 2;
/* Get buswidth information */
if (extid & 0x1)
chip->options |= NAND_BUSWIDTH_16;
}
EXPORT_SYMBOL_GPL(nand_decode_ext_id);
/*
* Old devices have chip data hardcoded in the device ID table. nand_decode_id
* decodes a matching ID table entry and assigns the MTD size parameters for
* the chip.
*/
static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
memorg = nanddev_get_memorg(&chip->base);
memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
mtd->erasesize = type->erasesize;
memorg->pagesize = type->pagesize;
mtd->writesize = memorg->pagesize;
memorg->oobsize = memorg->pagesize / 32;
mtd->oobsize = memorg->oobsize;
/* All legacy ID NAND are small-page, SLC */
memorg->bits_per_cell = 1;
}
/*
* Set the bad block marker/indicator (BBM/BBI) patterns according to some
* heuristic patterns using various detected parameters (e.g., manufacturer,
* page size, cell-type information).
*/
static void nand_decode_bbm_options(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
/* Set the bad block position */
if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
chip->badblockpos = NAND_BBM_POS_LARGE;
else
chip->badblockpos = NAND_BBM_POS_SMALL;
}
static inline bool is_full_id_nand(struct nand_flash_dev *type)
{
return type->id_len;
}
static bool find_full_id_nand(struct nand_chip *chip,
struct nand_flash_dev *type)
{
struct nand_device *base = &chip->base;
struct nand_ecc_props requirements;
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
u8 *id_data = chip->id.data;
memorg = nanddev_get_memorg(&chip->base);
if (!strncmp(type->id, id_data, type->id_len)) {
memorg->pagesize = type->pagesize;
mtd->writesize = memorg->pagesize;
memorg->pages_per_eraseblock = type->erasesize /
type->pagesize;
mtd->erasesize = type->erasesize;
memorg->oobsize = type->oobsize;
mtd->oobsize = memorg->oobsize;
memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
memorg->eraseblocks_per_lun =
DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
memorg->pagesize *
memorg->pages_per_eraseblock);
chip->options |= type->options;
requirements.strength = NAND_ECC_STRENGTH(type);
requirements.step_size = NAND_ECC_STEP(type);
nanddev_set_ecc_requirements(base, &requirements);
chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
if (!chip->parameters.model)
return false;
return true;
}
return false;
}
/*
* Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
* compliant and does not have a full-id or legacy-id entry in the nand_ids
* table.
*/
static void nand_manufacturer_detect(struct nand_chip *chip)
{
/*
* Try manufacturer detection if available and use
* nand_decode_ext_id() otherwise.
*/
if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
chip->manufacturer.desc->ops->detect) {
struct nand_memory_organization *memorg;
memorg = nanddev_get_memorg(&chip->base);
/* The 3rd id byte holds MLC / multichip data */
memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
chip->manufacturer.desc->ops->detect(chip);
} else {
nand_decode_ext_id(chip);
}
}
/*
* Manufacturer initialization. This function is called for all NANDs including
* ONFI and JEDEC compliant ones.
* Manufacturer drivers should put all their specific initialization code in
* their ->init() hook.
*/
static int nand_manufacturer_init(struct nand_chip *chip)
{
if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
!chip->manufacturer.desc->ops->init)
return 0;
return chip->manufacturer.desc->ops->init(chip);
}
/*
* Manufacturer cleanup. This function is called for all NANDs including
* ONFI and JEDEC compliant ones.
* Manufacturer drivers should put all their specific cleanup code in their
* ->cleanup() hook.
*/
static void nand_manufacturer_cleanup(struct nand_chip *chip)
{
/* Release manufacturer private data */
if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
chip->manufacturer.desc->ops->cleanup)
chip->manufacturer.desc->ops->cleanup(chip);
}
static const char *
nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
{
return manufacturer_desc ? manufacturer_desc->name : "Unknown";
}
static void rawnand_check_data_only_read_support(struct nand_chip *chip)
{
/* Use an arbitrary size for the check */
if (!nand_read_data_op(chip, NULL, SZ_512, true, true))
chip->controller->supported_op.data_only_read = 1;
}
static void rawnand_early_check_supported_ops(struct nand_chip *chip)
{
/* The supported_op fields should not be set by individual drivers */
WARN_ON_ONCE(chip->controller->supported_op.data_only_read);
if (!nand_has_exec_op(chip))
return;
rawnand_check_data_only_read_support(chip);
}
static void rawnand_check_cont_read_support(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
if (chip->read_retries)
return;
if (!nand_lp_exec_cont_read_page_op(chip, 0, 0, NULL,
mtd->writesize, true))
chip->controller->supported_op.cont_read = 1;
}
static void rawnand_late_check_supported_ops(struct nand_chip *chip)
{
/* The supported_op fields should not be set by individual drivers */
WARN_ON_ONCE(chip->controller->supported_op.cont_read);
if (!nand_has_exec_op(chip))
return;
rawnand_check_cont_read_support(chip);
}
/*
* Get the flash and manufacturer id and lookup if the type is supported.
*/
static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
{
const struct nand_manufacturer_desc *manufacturer_desc;
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
int busw, ret;
u8 *id_data = chip->id.data;
u8 maf_id, dev_id;
u64 targetsize;
/*
* Let's start by initializing memorg fields that might be left
* unassigned by the ID-based detection logic.
*/
memorg = nanddev_get_memorg(&chip->base);
memorg->planes_per_lun = 1;
memorg->luns_per_target = 1;
/*
* Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
* after power-up.
*/
ret = nand_reset(chip, 0);
if (ret)
return ret;
/* Select the device */
nand_select_target(chip, 0);
rawnand_early_check_supported_ops(chip);
/* Send the command for reading device ID */
ret = nand_readid_op(chip, 0, id_data, 2);
if (ret)
return ret;
/* Read manufacturer and device IDs */
maf_id = id_data[0];
dev_id = id_data[1];
/*
* Try again to make sure, as some systems the bus-hold or other
* interface concerns can cause random data which looks like a
* possibly credible NAND flash to appear. If the two results do
* not match, ignore the device completely.
*/
/* Read entire ID string */
ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
if (ret)
return ret;
if (id_data[0] != maf_id || id_data[1] != dev_id) {
pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
maf_id, dev_id, id_data[0], id_data[1]);
return -ENODEV;
}
chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
/* Try to identify manufacturer */
manufacturer_desc = nand_get_manufacturer_desc(maf_id);
chip->manufacturer.desc = manufacturer_desc;
if (!type)
type = nand_flash_ids;
/*
* Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
* override it.
* This is required to make sure initial NAND bus width set by the
* NAND controller driver is coherent with the real NAND bus width
* (extracted by auto-detection code).
*/
busw = chip->options & NAND_BUSWIDTH_16;
/*
* The flag is only set (never cleared), reset it to its default value
* before starting auto-detection.
*/
chip->options &= ~NAND_BUSWIDTH_16;
for (; type->name != NULL; type++) {
if (is_full_id_nand(type)) {
if (find_full_id_nand(chip, type))
goto ident_done;
} else if (dev_id == type->dev_id) {
break;
}
}
if (!type->name || !type->pagesize) {
/* Check if the chip is ONFI compliant */
ret = nand_onfi_detect(chip);
if (ret < 0)
return ret;
else if (ret)
goto ident_done;
/* Check if the chip is JEDEC compliant */
ret = nand_jedec_detect(chip);
if (ret < 0)
return ret;
else if (ret)
goto ident_done;
}
if (!type->name)
return -ENODEV;
chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
if (!chip->parameters.model)
return -ENOMEM;
if (!type->pagesize)
nand_manufacturer_detect(chip);
else
nand_decode_id(chip, type);
/* Get chip options */
chip->options |= type->options;
memorg->eraseblocks_per_lun =
DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
memorg->pagesize *
memorg->pages_per_eraseblock);
ident_done:
if (!mtd->name)
mtd->name = chip->parameters.model;
if (chip->options & NAND_BUSWIDTH_AUTO) {
WARN_ON(busw & NAND_BUSWIDTH_16);
nand_set_defaults(chip);
} else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
/*
* Check, if buswidth is correct. Hardware drivers should set
* chip correct!
*/
pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
maf_id, dev_id);
pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
mtd->name);
pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
(chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
ret = -EINVAL;
goto free_detect_allocation;
}
nand_decode_bbm_options(chip);
/* Calculate the address shift from the page size */
chip->page_shift = ffs(mtd->writesize) - 1;
/* Convert chipsize to number of pages per chip -1 */
targetsize = nanddev_target_size(&chip->base);
chip->pagemask = (targetsize >> chip->page_shift) - 1;
chip->bbt_erase_shift = chip->phys_erase_shift =
ffs(mtd->erasesize) - 1;
if (targetsize & 0xffffffff)
chip->chip_shift = ffs((unsigned)targetsize) - 1;
else {
chip->chip_shift = ffs((unsigned)(targetsize >> 32));
chip->chip_shift += 32 - 1;
}
if (chip->chip_shift - chip->page_shift > 16)
chip->options |= NAND_ROW_ADDR_3;
chip->badblockbits = 8;
nand_legacy_adjust_cmdfunc(chip);
pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
maf_id, dev_id);
pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
chip->parameters.model);
pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
(int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
return 0;
free_detect_allocation:
kfree(chip->parameters.model);
return ret;
}
static enum nand_ecc_engine_type
of_get_rawnand_ecc_engine_type_legacy(struct device_node *np)
{
enum nand_ecc_legacy_mode {
NAND_ECC_INVALID,
NAND_ECC_NONE,
NAND_ECC_SOFT,
NAND_ECC_SOFT_BCH,
NAND_ECC_HW,
NAND_ECC_HW_SYNDROME,
NAND_ECC_ON_DIE,
};
const char * const nand_ecc_legacy_modes[] = {
[NAND_ECC_NONE] = "none",
[NAND_ECC_SOFT] = "soft",
[NAND_ECC_SOFT_BCH] = "soft_bch",
[NAND_ECC_HW] = "hw",
[NAND_ECC_HW_SYNDROME] = "hw_syndrome",
[NAND_ECC_ON_DIE] = "on-die",
};
enum nand_ecc_legacy_mode eng_type;
const char *pm;
int err;
err = of_property_read_string(np, "nand-ecc-mode", &pm);
if (err)
return NAND_ECC_ENGINE_TYPE_INVALID;
for (eng_type = NAND_ECC_NONE;
eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) {
if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) {
switch (eng_type) {
case NAND_ECC_NONE:
return NAND_ECC_ENGINE_TYPE_NONE;
case NAND_ECC_SOFT:
case NAND_ECC_SOFT_BCH:
return NAND_ECC_ENGINE_TYPE_SOFT;
case NAND_ECC_HW:
case NAND_ECC_HW_SYNDROME:
return NAND_ECC_ENGINE_TYPE_ON_HOST;
case NAND_ECC_ON_DIE:
return NAND_ECC_ENGINE_TYPE_ON_DIE;
default:
break;
}
}
}
return NAND_ECC_ENGINE_TYPE_INVALID;
}
static enum nand_ecc_placement
of_get_rawnand_ecc_placement_legacy(struct device_node *np)
{
const char *pm;
int err;
err = of_property_read_string(np, "nand-ecc-mode", &pm);
if (!err) {
if (!strcasecmp(pm, "hw_syndrome"))
return NAND_ECC_PLACEMENT_INTERLEAVED;
}
return NAND_ECC_PLACEMENT_UNKNOWN;
}
static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np)
{
const char *pm;
int err;
err = of_property_read_string(np, "nand-ecc-mode", &pm);
if (!err) {
if (!strcasecmp(pm, "soft"))
return NAND_ECC_ALGO_HAMMING;
else if (!strcasecmp(pm, "soft_bch"))
return NAND_ECC_ALGO_BCH;
}
return NAND_ECC_ALGO_UNKNOWN;
}
static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
{
struct device_node *dn = nand_get_flash_node(chip);
struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf;
if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn);
if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN)
user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn);
if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN)
user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
}
static int of_get_nand_bus_width(struct nand_chip *chip)
{
struct device_node *dn = nand_get_flash_node(chip);
u32 val;
int ret;
ret = of_property_read_u32(dn, "nand-bus-width", &val);
if (ret == -EINVAL)
/* Buswidth defaults to 8 if the property does not exist .*/
return 0;
else if (ret)
return ret;
if (val == 16)
chip->options |= NAND_BUSWIDTH_16;
else if (val != 8)
return -EINVAL;
return 0;
}
static int of_get_nand_secure_regions(struct nand_chip *chip)
{
struct device_node *dn = nand_get_flash_node(chip);
struct property *prop;
int nr_elem, i, j;
/* Only proceed if the "secure-regions" property is present in DT */
prop = of_find_property(dn, "secure-regions", NULL);
if (!prop)
return 0;
nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
if (nr_elem <= 0)
return nr_elem;
chip->nr_secure_regions = nr_elem / 2;
chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
GFP_KERNEL);
if (!chip->secure_regions)
return -ENOMEM;
for (i = 0, j = 0; i < chip->nr_secure_regions; i++, j += 2) {
of_property_read_u64_index(dn, "secure-regions", j,
&chip->secure_regions[i].offset);
of_property_read_u64_index(dn, "secure-regions", j + 1,
&chip->secure_regions[i].size);
}
return 0;
}
/**
* rawnand_dt_parse_gpio_cs - Parse the gpio-cs property of a controller
* @dev: Device that will be parsed. Also used for managed allocations.
* @cs_array: Array of GPIO desc pointers allocated on success
* @ncs_array: Number of entries in @cs_array updated on success.
* @return 0 on success, an error otherwise.
*/
int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
unsigned int *ncs_array)
{
struct gpio_desc **descs;
int ndescs, i;
ndescs = gpiod_count(dev, "cs");
if (ndescs < 0) {
dev_dbg(dev, "No valid cs-gpios property\n");
return 0;
}
descs = devm_kcalloc(dev, ndescs, sizeof(*descs), GFP_KERNEL);
if (!descs)
return -ENOMEM;
for (i = 0; i < ndescs; i++) {
descs[i] = gpiod_get_index_optional(dev, "cs", i,
GPIOD_OUT_HIGH);
if (IS_ERR(descs[i]))
return PTR_ERR(descs[i]);
}
*ncs_array = ndescs;
*cs_array = descs;
return 0;
}
EXPORT_SYMBOL(rawnand_dt_parse_gpio_cs);
static int rawnand_dt_init(struct nand_chip *chip)
{
struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
struct device_node *dn = nand_get_flash_node(chip);
int ret;
if (!dn)
return 0;
ret = of_get_nand_bus_width(chip);
if (ret)
return ret;
if (of_property_read_bool(dn, "nand-is-boot-medium"))
chip->options |= NAND_IS_BOOT_MEDIUM;
if (of_property_read_bool(dn, "nand-on-flash-bbt"))
chip->bbt_options |= NAND_BBT_USE_FLASH;
of_get_nand_ecc_user_config(nand);
of_get_nand_ecc_legacy_user_config(chip);
/*
* If neither the user nor the NAND controller have requested a specific
* ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST.
*/
nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
/*
* Use the user requested engine type, unless there is none, in this
* case default to the NAND controller choice, otherwise fallback to
* the raw NAND default one.
*/
if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID)
chip->ecc.engine_type = nand->ecc.user_conf.engine_type;
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
chip->ecc.engine_type = nand->ecc.defaults.engine_type;
chip->ecc.placement = nand->ecc.user_conf.placement;
chip->ecc.algo = nand->ecc.user_conf.algo;
chip->ecc.strength = nand->ecc.user_conf.strength;
chip->ecc.size = nand->ecc.user_conf.step_size;
return 0;
}
/**
* nand_scan_ident - Scan for the NAND device
* @chip: NAND chip object
* @maxchips: number of chips to scan for
* @table: alternative NAND ID table
*
* This is the first phase of the normal nand_scan() function. It reads the
* flash ID and sets up MTD fields accordingly.
*
* This helper used to be called directly from controller drivers that needed
* to tweak some ECC-related parameters before nand_scan_tail(). This separation
* prevented dynamic allocations during this phase which was unconvenient and
* as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
*/
static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
struct nand_flash_dev *table)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
int nand_maf_id, nand_dev_id;
unsigned int i;
int ret;
memorg = nanddev_get_memorg(&chip->base);
/* Assume all dies are deselected when we enter nand_scan_ident(). */
chip->cur_cs = -1;
mutex_init(&chip->lock);
init_waitqueue_head(&chip->resume_wq);
/* Enforce the right timings for reset/detection */
chip->current_interface_config = nand_get_reset_interface_config();
ret = rawnand_dt_init(chip);
if (ret)
return ret;
if (!mtd->name && mtd->dev.parent)
mtd->name = dev_name(mtd->dev.parent);
/* Set the default functions */
nand_set_defaults(chip);
ret = nand_legacy_check_hooks(chip);
if (ret)
return ret;
memorg->ntargets = maxchips;
/* Read the flash type */
ret = nand_detect(chip, table);
if (ret) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
pr_warn("No NAND device found\n");
nand_deselect_target(chip);
return ret;
}
nand_maf_id = chip->id.data[0];
nand_dev_id = chip->id.data[1];
nand_deselect_target(chip);
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
u8 id[2];
/* See comment in nand_get_flash_type for reset */
ret = nand_reset(chip, i);
if (ret)
break;
nand_select_target(chip, i);
/* Send the command for reading device ID */
ret = nand_readid_op(chip, 0, id, sizeof(id));
if (ret)
break;
/* Read manufacturer and device IDs */
if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
nand_deselect_target(chip);
break;
}
nand_deselect_target(chip);
}
if (i > 1)
pr_info("%d chips detected\n", i);
/* Store the number of chips and calc total size for mtd */
memorg->ntargets = i;
mtd->size = i * nanddev_target_size(&chip->base);
return 0;
}
static void nand_scan_ident_cleanup(struct nand_chip *chip)
{
kfree(chip->parameters.model);
kfree(chip->parameters.onfi);
}
int rawnand_sw_hamming_init(struct nand_chip *chip)
{
struct nand_ecc_sw_hamming_conf *engine_conf;
struct nand_device *base = &chip->base;
int ret;
base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING;
base->ecc.user_conf.strength = chip->ecc.strength;
base->ecc.user_conf.step_size = chip->ecc.size;
ret = nand_ecc_sw_hamming_init_ctx(base);
if (ret)
return ret;
engine_conf = base->ecc.ctx.priv;
if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER)
engine_conf->sm_order = true;
chip->ecc.size = base->ecc.ctx.conf.step_size;
chip->ecc.strength = base->ecc.ctx.conf.strength;
chip->ecc.total = base->ecc.ctx.total;
chip->ecc.steps = nanddev_get_ecc_nsteps(base);
chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
return 0;
}
EXPORT_SYMBOL(rawnand_sw_hamming_init);
int rawnand_sw_hamming_calculate(struct nand_chip *chip,
const unsigned char *buf,
unsigned char *code)
{
struct nand_device *base = &chip->base;
return nand_ecc_sw_hamming_calculate(base, buf, code);
}
EXPORT_SYMBOL(rawnand_sw_hamming_calculate);
int rawnand_sw_hamming_correct(struct nand_chip *chip,
unsigned char *buf,
unsigned char *read_ecc,
unsigned char *calc_ecc)
{
struct nand_device *base = &chip->base;
return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc);
}
EXPORT_SYMBOL(rawnand_sw_hamming_correct);
void rawnand_sw_hamming_cleanup(struct nand_chip *chip)
{
struct nand_device *base = &chip->base;
nand_ecc_sw_hamming_cleanup_ctx(base);
}
EXPORT_SYMBOL(rawnand_sw_hamming_cleanup);
int rawnand_sw_bch_init(struct nand_chip *chip)
{
struct nand_device *base = &chip->base;
const struct nand_ecc_props *ecc_conf = nanddev_get_ecc_conf(base);
int ret;
base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH;
base->ecc.user_conf.step_size = chip->ecc.size;
base->ecc.user_conf.strength = chip->ecc.strength;
ret = nand_ecc_sw_bch_init_ctx(base);
if (ret)
return ret;
chip->ecc.size = ecc_conf->step_size;
chip->ecc.strength = ecc_conf->strength;
chip->ecc.total = base->ecc.ctx.total;
chip->ecc.steps = nanddev_get_ecc_nsteps(base);
chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
return 0;
}
EXPORT_SYMBOL(rawnand_sw_bch_init);
static int rawnand_sw_bch_calculate(struct nand_chip *chip,
const unsigned char *buf,
unsigned char *code)
{
struct nand_device *base = &chip->base;
return nand_ecc_sw_bch_calculate(base, buf, code);
}
int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc)
{
struct nand_device *base = &chip->base;
return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc);
}
EXPORT_SYMBOL(rawnand_sw_bch_correct);
void rawnand_sw_bch_cleanup(struct nand_chip *chip)
{
struct nand_device *base = &chip->base;
nand_ecc_sw_bch_cleanup_ctx(base);
}
EXPORT_SYMBOL(rawnand_sw_bch_cleanup);
static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
{
struct nand_ecc_ctrl *ecc = &chip->ecc;
switch (ecc->placement) {
case NAND_ECC_PLACEMENT_UNKNOWN:
case NAND_ECC_PLACEMENT_OOB:
/* Use standard hwecc read page function? */
if (!ecc->read_page)
ecc->read_page = nand_read_page_hwecc;
if (!ecc->write_page)
ecc->write_page = nand_write_page_hwecc;
if (!ecc->read_page_raw)
ecc->read_page_raw = nand_read_page_raw;
if (!ecc->write_page_raw)
ecc->write_page_raw = nand_write_page_raw;
if (!ecc->read_oob)
ecc->read_oob = nand_read_oob_std;
if (!ecc->write_oob)
ecc->write_oob = nand_write_oob_std;
if (!ecc->read_subpage)
ecc->read_subpage = nand_read_subpage;
if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
ecc->write_subpage = nand_write_subpage_hwecc;
fallthrough;
case NAND_ECC_PLACEMENT_INTERLEAVED:
if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
(!ecc->read_page ||
ecc->read_page == nand_read_page_hwecc ||
!ecc->write_page ||
ecc->write_page == nand_write_page_hwecc)) {
WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
return -EINVAL;
}
/* Use standard syndrome read/write page function? */
if (!ecc->read_page)
ecc->read_page = nand_read_page_syndrome;
if (!ecc->write_page)
ecc->write_page = nand_write_page_syndrome;
if (!ecc->read_page_raw)
ecc->read_page_raw = nand_read_page_raw_syndrome;
if (!ecc->write_page_raw)
ecc->write_page_raw = nand_write_page_raw_syndrome;
if (!ecc->read_oob)
ecc->read_oob = nand_read_oob_syndrome;
if (!ecc->write_oob)
ecc->write_oob = nand_write_oob_syndrome;
break;
default:
pr_warn("Invalid NAND_ECC_PLACEMENT %d\n",
ecc->placement);
return -EINVAL;
}
return 0;
}
static int nand_set_ecc_soft_ops(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_device *nanddev = mtd_to_nanddev(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret;
if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
return -EINVAL;
switch (ecc->algo) {
case NAND_ECC_ALGO_HAMMING:
ecc->calculate = rawnand_sw_hamming_calculate;
ecc->correct = rawnand_sw_hamming_correct;
ecc->read_page = nand_read_page_swecc;
ecc->read_subpage = nand_read_subpage;
ecc->write_page = nand_write_page_swecc;
if (!ecc->read_page_raw)
ecc->read_page_raw = nand_read_page_raw;
if (!ecc->write_page_raw)
ecc->write_page_raw = nand_write_page_raw;
ecc->read_oob = nand_read_oob_std;
ecc->write_oob = nand_write_oob_std;
if (!ecc->size)
ecc->size = 256;
ecc->bytes = 3;
ecc->strength = 1;
if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
ret = rawnand_sw_hamming_init(chip);
if (ret) {
WARN(1, "Hamming ECC initialization failed!\n");
return ret;
}
return 0;
case NAND_ECC_ALGO_BCH:
if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
return -EINVAL;
}
ecc->calculate = rawnand_sw_bch_calculate;
ecc->correct = rawnand_sw_bch_correct;
ecc->read_page = nand_read_page_swecc;
ecc->read_subpage = nand_read_subpage;
ecc->write_page = nand_write_page_swecc;
if (!ecc->read_page_raw)
ecc->read_page_raw = nand_read_page_raw;
if (!ecc->write_page_raw)
ecc->write_page_raw = nand_write_page_raw;
ecc->read_oob = nand_read_oob_std;
ecc->write_oob = nand_write_oob_std;
/*
* We can only maximize ECC config when the default layout is
* used, otherwise we don't know how many bytes can really be
* used.
*/
if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH &&
mtd->ooblayout != nand_get_large_page_ooblayout())
nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH;
ret = rawnand_sw_bch_init(chip);
if (ret) {
WARN(1, "BCH ECC initialization failed!\n");
return ret;
}
return 0;
default:
WARN(1, "Unsupported ECC algorithm!\n");
return -EINVAL;
}
}
/**
* nand_check_ecc_caps - check the sanity of preset ECC settings
* @chip: nand chip info structure
* @caps: ECC caps info structure
* @oobavail: OOB size that the ECC engine can use
*
* When ECC step size and strength are already set, check if they are supported
* by the controller and the calculated ECC bytes fit within the chip's OOB.
* On success, the calculated ECC bytes is set.
*/
static int
nand_check_ecc_caps(struct nand_chip *chip,
const struct nand_ecc_caps *caps, int oobavail)
{
struct mtd_info *mtd = nand_to_mtd(chip);
const struct nand_ecc_step_info *stepinfo;
int preset_step = chip->ecc.size;
int preset_strength = chip->ecc.strength;
int ecc_bytes, nsteps = mtd->writesize / preset_step;
int i, j;
for (i = 0; i < caps->nstepinfos; i++) {
stepinfo = &caps->stepinfos[i];
if (stepinfo->stepsize != preset_step)
continue;
for (j = 0; j < stepinfo->nstrengths; j++) {
if (stepinfo->strengths[j] != preset_strength)
continue;
ecc_bytes = caps->calc_ecc_bytes(preset_step,
preset_strength);
if (WARN_ON_ONCE(ecc_bytes < 0))
return ecc_bytes;
if (ecc_bytes * nsteps > oobavail) {
pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
preset_step, preset_strength);
return -ENOSPC;
}
chip->ecc.bytes = ecc_bytes;
return 0;
}
}
pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
preset_step, preset_strength);
return -ENOTSUPP;
}
/**
* nand_match_ecc_req - meet the chip's requirement with least ECC bytes
* @chip: nand chip info structure
* @caps: ECC engine caps info structure
* @oobavail: OOB size that the ECC engine can use
*
* If a chip's ECC requirement is provided, try to meet it with the least
* number of ECC bytes (i.e. with the largest number of OOB-free bytes).
* On success, the chosen ECC settings are set.
*/
static int
nand_match_ecc_req(struct nand_chip *chip,
const struct nand_ecc_caps *caps, int oobavail)
{
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(&chip->base);
struct mtd_info *mtd = nand_to_mtd(chip);
const struct nand_ecc_step_info *stepinfo;
int req_step = requirements->step_size;
int req_strength = requirements->strength;
int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
int best_step = 0, best_strength = 0, best_ecc_bytes = 0;
int best_ecc_bytes_total = INT_MAX;
int i, j;
/* No information provided by the NAND chip */
if (!req_step || !req_strength)
return -ENOTSUPP;
/* number of correctable bits the chip requires in a page */
req_corr = mtd->writesize / req_step * req_strength;
for (i = 0; i < caps->nstepinfos; i++) {
stepinfo = &caps->stepinfos[i];
step_size = stepinfo->stepsize;
for (j = 0; j < stepinfo->nstrengths; j++) {
strength = stepinfo->strengths[j];
/*
* If both step size and strength are smaller than the
* chip's requirement, it is not easy to compare the
* resulted reliability.
*/
if (step_size < req_step && strength < req_strength)
continue;
if (mtd->writesize % step_size)
continue;
nsteps = mtd->writesize / step_size;
ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
if (WARN_ON_ONCE(ecc_bytes < 0))
continue;
ecc_bytes_total = ecc_bytes * nsteps;
if (ecc_bytes_total > oobavail ||
strength * nsteps < req_corr)
continue;
/*
* We assume the best is to meet the chip's requrement
* with the least number of ECC bytes.
*/
if (ecc_bytes_total < best_ecc_bytes_total) {
best_ecc_bytes_total = ecc_bytes_total;
best_step = step_size;
best_strength = strength;
best_ecc_bytes = ecc_bytes;
}
}
}
if (best_ecc_bytes_total == INT_MAX)
return -ENOTSUPP;
chip->ecc.size = best_step;
chip->ecc.strength = best_strength;
chip->ecc.bytes = best_ecc_bytes;
return 0;
}
/**
* nand_maximize_ecc - choose the max ECC strength available
* @chip: nand chip info structure
* @caps: ECC engine caps info structure
* @oobavail: OOB size that the ECC engine can use
*
* Choose the max ECC strength that is supported on the controller, and can fit
* within the chip's OOB. On success, the chosen ECC settings are set.
*/
static int
nand_maximize_ecc(struct nand_chip *chip,
const struct nand_ecc_caps *caps, int oobavail)
{
struct mtd_info *mtd = nand_to_mtd(chip);
const struct nand_ecc_step_info *stepinfo;
int step_size, strength, nsteps, ecc_bytes, corr;
int best_corr = 0;
int best_step = 0;
int best_strength = 0, best_ecc_bytes = 0;
int i, j;
for (i = 0; i < caps->nstepinfos; i++) {
stepinfo = &caps->stepinfos[i];
step_size = stepinfo->stepsize;
/* If chip->ecc.size is already set, respect it */
if (chip->ecc.size && step_size != chip->ecc.size)
continue;
for (j = 0; j < stepinfo->nstrengths; j++) {
strength = stepinfo->strengths[j];
if (mtd->writesize % step_size)
continue;
nsteps = mtd->writesize / step_size;
ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
if (WARN_ON_ONCE(ecc_bytes < 0))
continue;
if (ecc_bytes * nsteps > oobavail)
continue;
corr = strength * nsteps;
/*
* If the number of correctable bits is the same,
* bigger step_size has more reliability.
*/
if (corr > best_corr ||
(corr == best_corr && step_size > best_step)) {
best_corr = corr;
best_step = step_size;
best_strength = strength;
best_ecc_bytes = ecc_bytes;
}
}
}
if (!best_corr)
return -ENOTSUPP;
chip->ecc.size = best_step;
chip->ecc.strength = best_strength;
chip->ecc.bytes = best_ecc_bytes;
return 0;
}
/**
* nand_ecc_choose_conf - Set the ECC strength and ECC step size
* @chip: nand chip info structure
* @caps: ECC engine caps info structure
* @oobavail: OOB size that the ECC engine can use
*
* Choose the ECC configuration according to following logic.
*
* 1. If both ECC step size and ECC strength are already set (usually by DT)
* then check if it is supported by this controller.
* 2. If the user provided the nand-ecc-maximize property, then select maximum
* ECC strength.
* 3. Otherwise, try to match the ECC step size and ECC strength closest
* to the chip's requirement. If available OOB size can't fit the chip
* requirement then fallback to the maximum ECC step size and ECC strength.
*
* On success, the chosen ECC settings are set.
*/
int nand_ecc_choose_conf(struct nand_chip *chip,
const struct nand_ecc_caps *caps, int oobavail)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_device *nanddev = mtd_to_nanddev(mtd);
if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
return -EINVAL;
if (chip->ecc.size && chip->ecc.strength)
return nand_check_ecc_caps(chip, caps, oobavail);
if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
return nand_maximize_ecc(chip, caps, oobavail);
if (!nand_match_ecc_req(chip, caps, oobavail))
return 0;
return nand_maximize_ecc(chip, caps, oobavail);
}
EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
{
struct nand_chip *chip = container_of(nand, struct nand_chip,
base);
unsigned int eb = nanddev_pos_to_row(nand, pos);
int ret;
eb >>= nand->rowconv.eraseblock_addr_shift;
nand_select_target(chip, pos->target);
ret = nand_erase_op(chip, eb);
nand_deselect_target(chip);
return ret;
}
static int rawnand_markbad(struct nand_device *nand,
const struct nand_pos *pos)
{
struct nand_chip *chip = container_of(nand, struct nand_chip,
base);
return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
}
static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
{
struct nand_chip *chip = container_of(nand, struct nand_chip,
base);
int ret;
nand_select_target(chip, pos->target);
ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
nand_deselect_target(chip);
return ret;
}
static const struct nand_ops rawnand_ops = {
.erase = rawnand_erase,
.markbad = rawnand_markbad,
.isbad = rawnand_isbad,
};
/**
* nand_scan_tail - Scan for the NAND device
* @chip: NAND chip object
*
* This is the second phase of the normal nand_scan() function. It fills out
* all the uninitialized function pointers with the defaults and scans for a
* bad block table if appropriate.
*/
static int nand_scan_tail(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i;
/* New bad blocks should be marked in OOB, flash-based BBT, or both */
if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
!(chip->bbt_options & NAND_BBT_USE_FLASH))) {
return -EINVAL;
}
chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
if (!chip->data_buf)
return -ENOMEM;
/*
* FIXME: some NAND manufacturer drivers expect the first die to be
* selected when manufacturer->init() is called. They should be fixed
* to explictly select the relevant die when interacting with the NAND
* chip.
*/
nand_select_target(chip, 0);
ret = nand_manufacturer_init(chip);
nand_deselect_target(chip);
if (ret)
goto err_free_buf;
/* Set the internal oob buffer location, just after the page data */
chip->oob_poi = chip->data_buf + mtd->writesize;
/*
* If no default placement scheme is given, select an appropriate one.
*/
if (!mtd->ooblayout &&
!(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
ecc->algo == NAND_ECC_ALGO_BCH) &&
!(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
ecc->algo == NAND_ECC_ALGO_HAMMING)) {
switch (mtd->oobsize) {
case 8:
case 16:
mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
break;
case 64:
case 128:
mtd_set_ooblayout(mtd,
nand_get_large_page_hamming_ooblayout());
break;
default:
/*
* Expose the whole OOB area to users if ECC_NONE
* is passed. We could do that for all kind of
* ->oobsize, but we must keep the old large/small
* page with ECC layout when ->oobsize <= 128 for
* compatibility reasons.
*/
if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
mtd_set_ooblayout(mtd,
nand_get_large_page_ooblayout());
break;
}
WARN(1, "No oob scheme defined for oobsize %d\n",
mtd->oobsize);
ret = -EINVAL;
goto err_nand_manuf_cleanup;
}
}
/*
* Check ECC mode, default to software if 3byte/512byte hardware ECC is
* selected and we have 256 byte pagesize fallback to software ECC
*/
switch (ecc->engine_type) {
case NAND_ECC_ENGINE_TYPE_ON_HOST:
ret = nand_set_ecc_on_host_ops(chip);
if (ret)
goto err_nand_manuf_cleanup;
if (mtd->writesize >= ecc->size) {
if (!ecc->strength) {
WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
ret = -EINVAL;
goto err_nand_manuf_cleanup;
}
break;
}
pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
ecc->size, mtd->writesize);
ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
ecc->algo = NAND_ECC_ALGO_HAMMING;
fallthrough;
case NAND_ECC_ENGINE_TYPE_SOFT:
ret = nand_set_ecc_soft_ops(chip);
if (ret)
goto err_nand_manuf_cleanup;
break;
case NAND_ECC_ENGINE_TYPE_ON_DIE:
if (!ecc->read_page || !ecc->write_page) {
WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
ret = -EINVAL;
goto err_nand_manuf_cleanup;
}
if (!ecc->read_oob)
ecc->read_oob = nand_read_oob_std;
if (!ecc->write_oob)
ecc->write_oob = nand_write_oob_std;
break;
case NAND_ECC_ENGINE_TYPE_NONE:
pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n");
ecc->read_page = nand_read_page_raw;
ecc->write_page = nand_write_page_raw;
ecc->read_oob = nand_read_oob_std;
ecc->read_page_raw = nand_read_page_raw;
ecc->write_page_raw = nand_write_page_raw;
ecc->write_oob = nand_write_oob_std;
ecc->size = mtd->writesize;
ecc->bytes = 0;
ecc->strength = 0;
break;
default:
WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type);
ret = -EINVAL;
goto err_nand_manuf_cleanup;
}
if (ecc->correct || ecc->calculate) {
ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
if (!ecc->calc_buf || !ecc->code_buf) {
ret = -ENOMEM;
goto err_nand_manuf_cleanup;
}
}
/* For many systems, the standard OOB write also works for raw */
if (!ecc->read_oob_raw)
ecc->read_oob_raw = ecc->read_oob;
if (!ecc->write_oob_raw)
ecc->write_oob_raw = ecc->write_oob;
/* propagate ecc info to mtd_info */
mtd->ecc_strength = ecc->strength;
mtd->ecc_step_size = ecc->size;
/*
* Set the number of read / write steps for one page depending on ECC
* mode.
*/
if (!ecc->steps)
ecc->steps = mtd->writesize / ecc->size;
if (ecc->steps * ecc->size != mtd->writesize) {
WARN(1, "Invalid ECC parameters\n");
ret = -EINVAL;
goto err_nand_manuf_cleanup;
}
if (!ecc->total) {
ecc->total = ecc->steps * ecc->bytes;
chip->base.ecc.ctx.total = ecc->total;
}
if (ecc->total > mtd->oobsize) {
WARN(1, "Total number of ECC bytes exceeded oobsize\n");
ret = -EINVAL;
goto err_nand_manuf_cleanup;
}
/*
* The number of bytes available for a client to place data into
* the out of band area.
*/
ret = mtd_ooblayout_count_freebytes(mtd);
if (ret < 0)
ret = 0;
mtd->oobavail = ret;
/* ECC sanity check: warn if it's too weak */
if (!nand_ecc_is_strong_enough(&chip->base))
pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
mtd->name, chip->ecc.strength, chip->ecc.size,
nanddev_get_ecc_requirements(&chip->base)->strength,
nanddev_get_ecc_requirements(&chip->base)->step_size);
/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
switch (ecc->steps) {
case 2:
mtd->subpage_sft = 1;
break;
case 4:
case 8:
case 16:
mtd->subpage_sft = 2;
break;
}
}
chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
/* Invalidate the pagebuffer reference */
chip->pagecache.page = -1;
/* Large page NAND with SOFT_ECC should support subpage reads */
switch (ecc->engine_type) {
case NAND_ECC_ENGINE_TYPE_SOFT:
if (chip->page_shift > 9)
chip->options |= NAND_SUBPAGE_READ;
break;
default:
break;
}
ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
if (ret)
goto err_nand_manuf_cleanup;
/* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
if (chip->options & NAND_ROM)
mtd->flags = MTD_CAP_ROM;
/* Fill in remaining MTD driver data */
mtd->_erase = nand_erase;
mtd->_point = NULL;
mtd->_unpoint = NULL;
mtd->_panic_write = panic_nand_write;
mtd->_read_oob = nand_read_oob;
mtd->_write_oob = nand_write_oob;
mtd->_sync = nand_sync;
mtd->_lock = nand_lock;
mtd->_unlock = nand_unlock;
mtd->_suspend = nand_suspend;
mtd->_resume = nand_resume;
mtd->_reboot = nand_shutdown;
mtd->_block_isreserved = nand_block_isreserved;
mtd->_block_isbad = nand_block_isbad;
mtd->_block_markbad = nand_block_markbad;
mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
/*
* Initialize bitflip_threshold to its default prior scan_bbt() call.
* scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
* properly set.
*/
if (!mtd->bitflip_threshold)
mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
/* Find the fastest data interface for this chip */
ret = nand_choose_interface_config(chip);
if (ret)
goto err_nanddev_cleanup;
/* Enter fastest possible mode on all dies. */
for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
ret = nand_setup_interface(chip, i);
if (ret)
goto err_free_interface_config;
}
rawnand_late_check_supported_ops(chip);
/*
* Look for secure regions in the NAND chip. These regions are supposed
* to be protected by a secure element like Trustzone. So the read/write
* accesses to these regions will be blocked in the runtime by this
* driver.
*/
ret = of_get_nand_secure_regions(chip);
if (ret)
goto err_free_interface_config;
/* Check, if we should skip the bad block table scan */
if (chip->options & NAND_SKIP_BBTSCAN)
return 0;
/* Build bad block table */
ret = nand_create_bbt(chip);
if (ret)
goto err_free_secure_regions;
return 0;
err_free_secure_regions:
kfree(chip->secure_regions);
err_free_interface_config:
kfree(chip->best_interface_config);
err_nanddev_cleanup:
nanddev_cleanup(&chip->base);
err_nand_manuf_cleanup:
nand_manufacturer_cleanup(chip);
err_free_buf:
kfree(chip->data_buf);
kfree(ecc->code_buf);
kfree(ecc->calc_buf);
return ret;
}
static int nand_attach(struct nand_chip *chip)
{
if (chip->controller->ops && chip->controller->ops->attach_chip)
return chip->controller->ops->attach_chip(chip);
return 0;
}
static void nand_detach(struct nand_chip *chip)
{
if (chip->controller->ops && chip->controller->ops->detach_chip)
chip->controller->ops->detach_chip(chip);
}
/**
* nand_scan_with_ids - [NAND Interface] Scan for the NAND device
* @chip: NAND chip object
* @maxchips: number of chips to scan for.
* @ids: optional flash IDs table
*
* This fills out all the uninitialized function pointers with the defaults.
* The flash ID is read and the mtd/chip structures are filled with the
* appropriate values.
*/
int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
struct nand_flash_dev *ids)
{
int ret;
if (!maxchips)
return -EINVAL;
ret = nand_scan_ident(chip, maxchips, ids);
if (ret)
return ret;
ret = nand_attach(chip);
if (ret)
goto cleanup_ident;
ret = nand_scan_tail(chip);
if (ret)
goto detach_chip;
return 0;
detach_chip:
nand_detach(chip);
cleanup_ident:
nand_scan_ident_cleanup(chip);
return ret;
}
EXPORT_SYMBOL(nand_scan_with_ids);
/**
* nand_cleanup - [NAND Interface] Free resources held by the NAND device
* @chip: NAND chip object
*/
void nand_cleanup(struct nand_chip *chip)
{
if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) {
if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
rawnand_sw_hamming_cleanup(chip);
else if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
rawnand_sw_bch_cleanup(chip);
}
nanddev_cleanup(&chip->base);
/* Free secure regions data */
kfree(chip->secure_regions);
/* Free bad block table memory */
kfree(chip->bbt);
kfree(chip->data_buf);
kfree(chip->ecc.code_buf);
kfree(chip->ecc.calc_buf);
/* Free bad block descriptor memory */
if (chip->badblock_pattern && chip->badblock_pattern->options
& NAND_BBT_DYNAMICSTRUCT)
kfree(chip->badblock_pattern);
/* Free the data interface */
kfree(chip->best_interface_config);
/* Free manufacturer priv data. */
nand_manufacturer_cleanup(chip);
/* Free controller specific allocations after chip identification */
nand_detach(chip);
/* Free identification phase allocations */
nand_scan_ident_cleanup(chip);
}
EXPORT_SYMBOL_GPL(nand_cleanup);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steven J. Hill <[email protected]>");
MODULE_AUTHOR("Thomas Gleixner <[email protected]>");
MODULE_DESCRIPTION("Generic NAND flash driver code");
| linux-master | drivers/mtd/nand/raw/nand_base.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Rockchip NAND Flash controller driver.
* Copyright (C) 2020 Rockchip Inc.
* Author: Yifeng Zhao <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
/*
* NFC Page Data Layout:
* 1024 bytes data + 4Bytes sys data + 28Bytes~124Bytes ECC data +
* 1024 bytes data + 4Bytes sys data + 28Bytes~124Bytes ECC data +
* ......
* NAND Page Data Layout:
* 1024 * n data + m Bytes oob
* Original Bad Block Mask Location:
* First byte of oob(spare).
* nand_chip->oob_poi data layout:
* 4Bytes sys data + .... + 4Bytes sys data + ECC data.
*/
/* NAND controller register definition */
#define NFC_READ (0)
#define NFC_WRITE (1)
#define NFC_FMCTL (0x00)
#define FMCTL_CE_SEL_M 0xFF
#define FMCTL_CE_SEL(x) (1 << (x))
#define FMCTL_WP BIT(8)
#define FMCTL_RDY BIT(9)
#define NFC_FMWAIT (0x04)
#define FLCTL_RST BIT(0)
#define FLCTL_WR (1) /* 0: read, 1: write */
#define FLCTL_XFER_ST BIT(2)
#define FLCTL_XFER_EN BIT(3)
#define FLCTL_ACORRECT BIT(10) /* Auto correct error bits. */
#define FLCTL_XFER_READY BIT(20)
#define FLCTL_XFER_SECTOR (22)
#define FLCTL_TOG_FIX BIT(29)
#define BCHCTL_BANK_M (7 << 5)
#define BCHCTL_BANK (5)
#define DMA_ST BIT(0)
#define DMA_WR (1) /* 0: write, 1: read */
#define DMA_EN BIT(2)
#define DMA_AHB_SIZE (3) /* 0: 1, 1: 2, 2: 4 */
#define DMA_BURST_SIZE (6) /* 0: 1, 3: 4, 5: 8, 7: 16 */
#define DMA_INC_NUM (9) /* 1 - 16 */
#define ECC_ERR_CNT(x, e) ((((x) >> (e).low) & (e).low_mask) |\
(((x) >> (e).high) & (e).high_mask) << (e).low_bn)
#define INT_DMA BIT(0)
#define NFC_BANK (0x800)
#define NFC_BANK_STEP (0x100)
#define BANK_DATA (0x00)
#define BANK_ADDR (0x04)
#define BANK_CMD (0x08)
#define NFC_SRAM0 (0x1000)
#define NFC_SRAM1 (0x1400)
#define NFC_SRAM_SIZE (0x400)
#define NFC_TIMEOUT (500000)
#define NFC_MAX_OOB_PER_STEP 128
#define NFC_MIN_OOB_PER_STEP 64
#define MAX_DATA_SIZE 0xFFFC
#define MAX_ADDRESS_CYC 6
#define NFC_ECC_MAX_MODES 4
#define NFC_MAX_NSELS (8) /* Some Socs only have 1 or 2 CSs. */
#define NFC_SYS_DATA_SIZE (4) /* 4 bytes sys data in oob pre 1024 data.*/
#define RK_DEFAULT_CLOCK_RATE (150 * 1000 * 1000) /* 150 Mhz */
#define ACCTIMING(csrw, rwpw, rwcs) ((csrw) << 12 | (rwpw) << 5 | (rwcs))
enum nfc_type {
NFC_V6,
NFC_V8,
NFC_V9,
};
/**
* struct rk_ecc_cnt_status: represent a ecc status data.
* @err_flag_bit: error flag bit index at register.
* @low: ECC count low bit index at register.
* @low_mask: mask bit.
* @low_bn: ECC count low bit number.
* @high: ECC count high bit index at register.
* @high_mask: mask bit
*/
struct ecc_cnt_status {
u8 err_flag_bit;
u8 low;
u8 low_mask;
u8 low_bn;
u8 high;
u8 high_mask;
};
/**
* @type: NFC version
* @ecc_strengths: ECC strengths
* @ecc_cfgs: ECC config values
* @flctl_off: FLCTL register offset
* @bchctl_off: BCHCTL register offset
* @dma_data_buf_off: DMA_DATA_BUF register offset
* @dma_oob_buf_off: DMA_OOB_BUF register offset
* @dma_cfg_off: DMA_CFG register offset
* @dma_st_off: DMA_ST register offset
* @bch_st_off: BCG_ST register offset
* @randmz_off: RANDMZ register offset
* @int_en_off: interrupt enable register offset
* @int_clr_off: interrupt clean register offset
* @int_st_off: interrupt status register offset
* @oob0_off: oob0 register offset
* @oob1_off: oob1 register offset
* @ecc0: represent ECC0 status data
* @ecc1: represent ECC1 status data
*/
struct nfc_cfg {
enum nfc_type type;
u8 ecc_strengths[NFC_ECC_MAX_MODES];
u32 ecc_cfgs[NFC_ECC_MAX_MODES];
u32 flctl_off;
u32 bchctl_off;
u32 dma_cfg_off;
u32 dma_data_buf_off;
u32 dma_oob_buf_off;
u32 dma_st_off;
u32 bch_st_off;
u32 randmz_off;
u32 int_en_off;
u32 int_clr_off;
u32 int_st_off;
u32 oob0_off;
u32 oob1_off;
struct ecc_cnt_status ecc0;
struct ecc_cnt_status ecc1;
};
struct rk_nfc_nand_chip {
struct list_head node;
struct nand_chip chip;
u16 boot_blks;
u16 metadata_size;
u32 boot_ecc;
u32 timing;
u8 nsels;
u8 sels[];
/* Nothing after this field. */
};
struct rk_nfc {
struct nand_controller controller;
const struct nfc_cfg *cfg;
struct device *dev;
struct clk *nfc_clk;
struct clk *ahb_clk;
void __iomem *regs;
u32 selected_bank;
u32 band_offset;
u32 cur_ecc;
u32 cur_timing;
struct completion done;
struct list_head chips;
u8 *page_buf;
u32 *oob_buf;
u32 page_buf_size;
u32 oob_buf_size;
unsigned long assigned_cs;
};
static inline struct rk_nfc_nand_chip *rk_nfc_to_rknand(struct nand_chip *chip)
{
return container_of(chip, struct rk_nfc_nand_chip, chip);
}
static inline u8 *rk_nfc_buf_to_data_ptr(struct nand_chip *chip, const u8 *p, int i)
{
return (u8 *)p + i * chip->ecc.size;
}
static inline u8 *rk_nfc_buf_to_oob_ptr(struct nand_chip *chip, int i)
{
u8 *poi;
poi = chip->oob_poi + i * NFC_SYS_DATA_SIZE;
return poi;
}
static inline u8 *rk_nfc_buf_to_oob_ecc_ptr(struct nand_chip *chip, int i)
{
struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
u8 *poi;
poi = chip->oob_poi + rknand->metadata_size + chip->ecc.bytes * i;
return poi;
}
static inline int rk_nfc_data_len(struct nand_chip *chip)
{
return chip->ecc.size + chip->ecc.bytes + NFC_SYS_DATA_SIZE;
}
static inline u8 *rk_nfc_data_ptr(struct nand_chip *chip, int i)
{
struct rk_nfc *nfc = nand_get_controller_data(chip);
return nfc->page_buf + i * rk_nfc_data_len(chip);
}
static inline u8 *rk_nfc_oob_ptr(struct nand_chip *chip, int i)
{
struct rk_nfc *nfc = nand_get_controller_data(chip);
return nfc->page_buf + i * rk_nfc_data_len(chip) + chip->ecc.size;
}
static int rk_nfc_hw_ecc_setup(struct nand_chip *chip, u32 strength)
{
struct rk_nfc *nfc = nand_get_controller_data(chip);
u32 reg, i;
for (i = 0; i < NFC_ECC_MAX_MODES; i++) {
if (strength == nfc->cfg->ecc_strengths[i]) {
reg = nfc->cfg->ecc_cfgs[i];
break;
}
}
if (i >= NFC_ECC_MAX_MODES)
return -EINVAL;
writel(reg, nfc->regs + nfc->cfg->bchctl_off);
/* Save chip ECC setting */
nfc->cur_ecc = strength;
return 0;
}
static void rk_nfc_select_chip(struct nand_chip *chip, int cs)
{
struct rk_nfc *nfc = nand_get_controller_data(chip);
struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
u32 val;
if (cs < 0) {
nfc->selected_bank = -1;
/* Deselect the currently selected target. */
val = readl_relaxed(nfc->regs + NFC_FMCTL);
val &= ~FMCTL_CE_SEL_M;
writel(val, nfc->regs + NFC_FMCTL);
return;
}
nfc->selected_bank = rknand->sels[cs];
nfc->band_offset = NFC_BANK + nfc->selected_bank * NFC_BANK_STEP;
val = readl_relaxed(nfc->regs + NFC_FMCTL);
val &= ~FMCTL_CE_SEL_M;
val |= FMCTL_CE_SEL(nfc->selected_bank);
writel(val, nfc->regs + NFC_FMCTL);
/*
* Compare current chip timing with selected chip timing and
* change if needed.
*/
if (nfc->cur_timing != rknand->timing) {
writel(rknand->timing, nfc->regs + NFC_FMWAIT);
nfc->cur_timing = rknand->timing;
}
/*
* Compare current chip ECC setting with selected chip ECC setting and
* change if needed.
*/
if (nfc->cur_ecc != ecc->strength)
rk_nfc_hw_ecc_setup(chip, ecc->strength);
}
static inline int rk_nfc_wait_ioready(struct rk_nfc *nfc)
{
int rc;
u32 val;
rc = readl_relaxed_poll_timeout(nfc->regs + NFC_FMCTL, val,
val & FMCTL_RDY, 10, NFC_TIMEOUT);
return rc;
}
static void rk_nfc_read_buf(struct rk_nfc *nfc, u8 *buf, int len)
{
int i;
for (i = 0; i < len; i++)
buf[i] = readb_relaxed(nfc->regs + nfc->band_offset +
BANK_DATA);
}
static void rk_nfc_write_buf(struct rk_nfc *nfc, const u8 *buf, int len)
{
int i;
for (i = 0; i < len; i++)
writeb(buf[i], nfc->regs + nfc->band_offset + BANK_DATA);
}
static int rk_nfc_cmd(struct nand_chip *chip,
const struct nand_subop *subop)
{
struct rk_nfc *nfc = nand_get_controller_data(chip);
unsigned int i, j, remaining, start;
int reg_offset = nfc->band_offset;
u8 *inbuf = NULL;
const u8 *outbuf;
u32 cnt = 0;
int ret = 0;
for (i = 0; i < subop->ninstrs; i++) {
const struct nand_op_instr *instr = &subop->instrs[i];
switch (instr->type) {
case NAND_OP_CMD_INSTR:
writeb(instr->ctx.cmd.opcode,
nfc->regs + reg_offset + BANK_CMD);
break;
case NAND_OP_ADDR_INSTR:
remaining = nand_subop_get_num_addr_cyc(subop, i);
start = nand_subop_get_addr_start_off(subop, i);
for (j = 0; j < 8 && j + start < remaining; j++)
writeb(instr->ctx.addr.addrs[j + start],
nfc->regs + reg_offset + BANK_ADDR);
break;
case NAND_OP_DATA_IN_INSTR:
case NAND_OP_DATA_OUT_INSTR:
start = nand_subop_get_data_start_off(subop, i);
cnt = nand_subop_get_data_len(subop, i);
if (instr->type == NAND_OP_DATA_OUT_INSTR) {
outbuf = instr->ctx.data.buf.out + start;
rk_nfc_write_buf(nfc, outbuf, cnt);
} else {
inbuf = instr->ctx.data.buf.in + start;
rk_nfc_read_buf(nfc, inbuf, cnt);
}
break;
case NAND_OP_WAITRDY_INSTR:
if (rk_nfc_wait_ioready(nfc) < 0) {
ret = -ETIMEDOUT;
dev_err(nfc->dev, "IO not ready\n");
}
break;
}
}
return ret;
}
static const struct nand_op_parser rk_nfc_op_parser = NAND_OP_PARSER(
NAND_OP_PARSER_PATTERN(
rk_nfc_cmd,
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC),
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, MAX_DATA_SIZE)),
NAND_OP_PARSER_PATTERN(
rk_nfc_cmd,
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC),
NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, MAX_DATA_SIZE),
NAND_OP_PARSER_PAT_CMD_ELEM(true),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
);
static int rk_nfc_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
if (!check_only)
rk_nfc_select_chip(chip, op->cs);
return nand_op_parser_exec_op(chip, &rk_nfc_op_parser, op,
check_only);
}
static int rk_nfc_setup_interface(struct nand_chip *chip, int target,
const struct nand_interface_config *conf)
{
struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
struct rk_nfc *nfc = nand_get_controller_data(chip);
const struct nand_sdr_timings *timings;
u32 rate, tc2rw, trwpw, trw2c;
u32 temp;
if (target < 0)
return 0;
timings = nand_get_sdr_timings(conf);
if (IS_ERR(timings))
return -EOPNOTSUPP;
if (IS_ERR(nfc->nfc_clk))
rate = clk_get_rate(nfc->ahb_clk);
else
rate = clk_get_rate(nfc->nfc_clk);
/* Turn clock rate into kHz. */
rate /= 1000;
tc2rw = 1;
trw2c = 1;
trwpw = max(timings->tWC_min, timings->tRC_min) / 1000;
trwpw = DIV_ROUND_UP(trwpw * rate, 1000000);
temp = timings->tREA_max / 1000;
temp = DIV_ROUND_UP(temp * rate, 1000000);
if (trwpw < temp)
trwpw = temp;
/*
* ACCON: access timing control register
* -------------------------------------
* 31:18: reserved
* 17:12: csrw, clock cycles from the falling edge of CSn to the
* falling edge of RDn or WRn
* 11:11: reserved
* 10:05: rwpw, the width of RDn or WRn in processor clock cycles
* 04:00: rwcs, clock cycles from the rising edge of RDn or WRn to the
* rising edge of CSn
*/
/* Save chip timing */
rknand->timing = ACCTIMING(tc2rw, trwpw, trw2c);
return 0;
}
static void rk_nfc_xfer_start(struct rk_nfc *nfc, u8 rw, u8 n_KB,
dma_addr_t dma_data, dma_addr_t dma_oob)
{
u32 dma_reg, fl_reg, bch_reg;
dma_reg = DMA_ST | ((!rw) << DMA_WR) | DMA_EN | (2 << DMA_AHB_SIZE) |
(7 << DMA_BURST_SIZE) | (16 << DMA_INC_NUM);
fl_reg = (rw << FLCTL_WR) | FLCTL_XFER_EN | FLCTL_ACORRECT |
(n_KB << FLCTL_XFER_SECTOR) | FLCTL_TOG_FIX;
if (nfc->cfg->type == NFC_V6 || nfc->cfg->type == NFC_V8) {
bch_reg = readl_relaxed(nfc->regs + nfc->cfg->bchctl_off);
bch_reg = (bch_reg & (~BCHCTL_BANK_M)) |
(nfc->selected_bank << BCHCTL_BANK);
writel(bch_reg, nfc->regs + nfc->cfg->bchctl_off);
}
writel(dma_reg, nfc->regs + nfc->cfg->dma_cfg_off);
writel((u32)dma_data, nfc->regs + nfc->cfg->dma_data_buf_off);
writel((u32)dma_oob, nfc->regs + nfc->cfg->dma_oob_buf_off);
writel(fl_reg, nfc->regs + nfc->cfg->flctl_off);
fl_reg |= FLCTL_XFER_ST;
writel(fl_reg, nfc->regs + nfc->cfg->flctl_off);
}
static int rk_nfc_wait_for_xfer_done(struct rk_nfc *nfc)
{
void __iomem *ptr;
u32 reg;
ptr = nfc->regs + nfc->cfg->flctl_off;
return readl_relaxed_poll_timeout(ptr, reg,
reg & FLCTL_XFER_READY,
10, NFC_TIMEOUT);
}
static int rk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
int oob_on, int page)
{
struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
struct rk_nfc *nfc = nand_get_controller_data(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int i, pages_per_blk;
pages_per_blk = mtd->erasesize / mtd->writesize;
if ((chip->options & NAND_IS_BOOT_MEDIUM) &&
(page < (pages_per_blk * rknand->boot_blks)) &&
rknand->boot_ecc != ecc->strength) {
/*
* There's currently no method to notify the MTD framework that
* a different ECC strength is in use for the boot blocks.
*/
return -EIO;
}
if (!buf)
memset(nfc->page_buf, 0xff, mtd->writesize + mtd->oobsize);
for (i = 0; i < ecc->steps; i++) {
/* Copy data to the NFC buffer. */
if (buf)
memcpy(rk_nfc_data_ptr(chip, i),
rk_nfc_buf_to_data_ptr(chip, buf, i),
ecc->size);
/*
* The first four bytes of OOB are reserved for the
* boot ROM. In some debugging cases, such as with a
* read, erase and write back test these 4 bytes stored
* in OOB also need to be written back.
*
* The function nand_block_bad detects bad blocks like:
*
* bad = chip->oob_poi[chip->badblockpos];
*
* chip->badblockpos == 0 for a large page NAND Flash,
* so chip->oob_poi[0] is the bad block mask (BBM).
*
* The OOB data layout on the NFC is:
*
* PA0 PA1 PA2 PA3 | BBM OOB1 OOB2 OOB3 | ...
*
* or
*
* 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ...
*
* The code here just swaps the first 4 bytes with the last
* 4 bytes without losing any data.
*
* The chip->oob_poi data layout:
*
* BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3
*
* The rk_nfc_ooblayout_free() function already has reserved
* these 4 bytes together with 2 bytes for BBM
* by reducing it's length:
*
* oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2;
*/
if (!i)
memcpy(rk_nfc_oob_ptr(chip, i),
rk_nfc_buf_to_oob_ptr(chip, ecc->steps - 1),
NFC_SYS_DATA_SIZE);
else
memcpy(rk_nfc_oob_ptr(chip, i),
rk_nfc_buf_to_oob_ptr(chip, i - 1),
NFC_SYS_DATA_SIZE);
/* Copy ECC data to the NFC buffer. */
memcpy(rk_nfc_oob_ptr(chip, i) + NFC_SYS_DATA_SIZE,
rk_nfc_buf_to_oob_ecc_ptr(chip, i),
ecc->bytes);
}
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
rk_nfc_write_buf(nfc, buf, mtd->writesize + mtd->oobsize);
return nand_prog_page_end_op(chip);
}
static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
int oob_on, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct rk_nfc *nfc = nand_get_controller_data(chip);
struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int oob_step = (ecc->bytes > 60) ? NFC_MAX_OOB_PER_STEP :
NFC_MIN_OOB_PER_STEP;
int pages_per_blk = mtd->erasesize / mtd->writesize;
int ret = 0, i, boot_rom_mode = 0;
dma_addr_t dma_data, dma_oob;
u32 tmp;
u8 *oob;
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
if (buf)
memcpy(nfc->page_buf, buf, mtd->writesize);
else
memset(nfc->page_buf, 0xFF, mtd->writesize);
/*
* The first blocks (4, 8 or 16 depending on the device) are used
* by the boot ROM and the first 32 bits of OOB need to link to
* the next page address in the same block. We can't directly copy
* OOB data from the MTD framework, because this page address
* conflicts for example with the bad block marker (BBM),
* so we shift all OOB data including the BBM with 4 byte positions.
* As a consequence the OOB size available to the MTD framework is
* also reduced with 4 bytes.
*
* PA0 PA1 PA2 PA3 | BBM OOB1 OOB2 OOB3 | ...
*
* If a NAND is not a boot medium or the page is not a boot block,
* the first 4 bytes are left untouched by writing 0xFF to them.
*
* 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ...
*
* The code here just swaps the first 4 bytes with the last
* 4 bytes without losing any data.
*
* The chip->oob_poi data layout:
*
* BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3
*
* Configure the ECC algorithm supported by the boot ROM.
*/
if ((page < (pages_per_blk * rknand->boot_blks)) &&
(chip->options & NAND_IS_BOOT_MEDIUM)) {
boot_rom_mode = 1;
if (rknand->boot_ecc != ecc->strength)
rk_nfc_hw_ecc_setup(chip, rknand->boot_ecc);
}
for (i = 0; i < ecc->steps; i++) {
if (!i)
oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE;
else
oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
tmp = oob[0] | oob[1] << 8 | oob[2] << 16 | oob[3] << 24;
if (nfc->cfg->type == NFC_V9)
nfc->oob_buf[i] = tmp;
else
nfc->oob_buf[i * (oob_step / 4)] = tmp;
}
dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf,
mtd->writesize, DMA_TO_DEVICE);
dma_oob = dma_map_single(nfc->dev, nfc->oob_buf,
ecc->steps * oob_step,
DMA_TO_DEVICE);
reinit_completion(&nfc->done);
writel(INT_DMA, nfc->regs + nfc->cfg->int_en_off);
rk_nfc_xfer_start(nfc, NFC_WRITE, ecc->steps, dma_data,
dma_oob);
ret = wait_for_completion_timeout(&nfc->done,
msecs_to_jiffies(100));
if (!ret)
dev_warn(nfc->dev, "write: wait dma done timeout.\n");
/*
* Whether the DMA transfer is completed or not. The driver
* needs to check the NFC`s status register to see if the data
* transfer was completed.
*/
ret = rk_nfc_wait_for_xfer_done(nfc);
dma_unmap_single(nfc->dev, dma_data, mtd->writesize,
DMA_TO_DEVICE);
dma_unmap_single(nfc->dev, dma_oob, ecc->steps * oob_step,
DMA_TO_DEVICE);
if (boot_rom_mode && rknand->boot_ecc != ecc->strength)
rk_nfc_hw_ecc_setup(chip, ecc->strength);
if (ret) {
dev_err(nfc->dev, "write: wait transfer done timeout.\n");
return -ETIMEDOUT;
}
return nand_prog_page_end_op(chip);
}
static int rk_nfc_write_oob(struct nand_chip *chip, int page)
{
return rk_nfc_write_page_hwecc(chip, NULL, 1, page);
}
static int rk_nfc_read_page_raw(struct nand_chip *chip, u8 *buf, int oob_on,
int page)
{
struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
struct rk_nfc *nfc = nand_get_controller_data(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int i, pages_per_blk;
pages_per_blk = mtd->erasesize / mtd->writesize;
if ((chip->options & NAND_IS_BOOT_MEDIUM) &&
(page < (pages_per_blk * rknand->boot_blks)) &&
rknand->boot_ecc != ecc->strength) {
/*
* There's currently no method to notify the MTD framework that
* a different ECC strength is in use for the boot blocks.
*/
return -EIO;
}
nand_read_page_op(chip, page, 0, NULL, 0);
rk_nfc_read_buf(nfc, nfc->page_buf, mtd->writesize + mtd->oobsize);
for (i = 0; i < ecc->steps; i++) {
/*
* The first four bytes of OOB are reserved for the
* boot ROM. In some debugging cases, such as with a read,
* erase and write back test, these 4 bytes also must be
* saved somewhere, otherwise this information will be
* lost during a write back.
*/
if (!i)
memcpy(rk_nfc_buf_to_oob_ptr(chip, ecc->steps - 1),
rk_nfc_oob_ptr(chip, i),
NFC_SYS_DATA_SIZE);
else
memcpy(rk_nfc_buf_to_oob_ptr(chip, i - 1),
rk_nfc_oob_ptr(chip, i),
NFC_SYS_DATA_SIZE);
/* Copy ECC data from the NFC buffer. */
memcpy(rk_nfc_buf_to_oob_ecc_ptr(chip, i),
rk_nfc_oob_ptr(chip, i) + NFC_SYS_DATA_SIZE,
ecc->bytes);
/* Copy data from the NFC buffer. */
if (buf)
memcpy(rk_nfc_buf_to_data_ptr(chip, buf, i),
rk_nfc_data_ptr(chip, i),
ecc->size);
}
return 0;
}
static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct rk_nfc *nfc = nand_get_controller_data(chip);
struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int oob_step = (ecc->bytes > 60) ? NFC_MAX_OOB_PER_STEP :
NFC_MIN_OOB_PER_STEP;
int pages_per_blk = mtd->erasesize / mtd->writesize;
dma_addr_t dma_data, dma_oob;
int ret = 0, i, cnt, boot_rom_mode = 0;
int max_bitflips = 0, bch_st, ecc_fail = 0;
u8 *oob;
u32 tmp;
nand_read_page_op(chip, page, 0, NULL, 0);
dma_data = dma_map_single(nfc->dev, nfc->page_buf,
mtd->writesize,
DMA_FROM_DEVICE);
dma_oob = dma_map_single(nfc->dev, nfc->oob_buf,
ecc->steps * oob_step,
DMA_FROM_DEVICE);
/*
* The first blocks (4, 8 or 16 depending on the device)
* are used by the boot ROM.
* Configure the ECC algorithm supported by the boot ROM.
*/
if ((page < (pages_per_blk * rknand->boot_blks)) &&
(chip->options & NAND_IS_BOOT_MEDIUM)) {
boot_rom_mode = 1;
if (rknand->boot_ecc != ecc->strength)
rk_nfc_hw_ecc_setup(chip, rknand->boot_ecc);
}
reinit_completion(&nfc->done);
writel(INT_DMA, nfc->regs + nfc->cfg->int_en_off);
rk_nfc_xfer_start(nfc, NFC_READ, ecc->steps, dma_data,
dma_oob);
ret = wait_for_completion_timeout(&nfc->done,
msecs_to_jiffies(100));
if (!ret)
dev_warn(nfc->dev, "read: wait dma done timeout.\n");
/*
* Whether the DMA transfer is completed or not. The driver
* needs to check the NFC`s status register to see if the data
* transfer was completed.
*/
ret = rk_nfc_wait_for_xfer_done(nfc);
dma_unmap_single(nfc->dev, dma_data, mtd->writesize,
DMA_FROM_DEVICE);
dma_unmap_single(nfc->dev, dma_oob, ecc->steps * oob_step,
DMA_FROM_DEVICE);
if (ret) {
ret = -ETIMEDOUT;
dev_err(nfc->dev, "read: wait transfer done timeout.\n");
goto timeout_err;
}
for (i = 0; i < ecc->steps; i++) {
if (!i)
oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE;
else
oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
if (nfc->cfg->type == NFC_V9)
tmp = nfc->oob_buf[i];
else
tmp = nfc->oob_buf[i * (oob_step / 4)];
*oob++ = (u8)tmp;
*oob++ = (u8)(tmp >> 8);
*oob++ = (u8)(tmp >> 16);
*oob++ = (u8)(tmp >> 24);
}
for (i = 0; i < (ecc->steps / 2); i++) {
bch_st = readl_relaxed(nfc->regs +
nfc->cfg->bch_st_off + i * 4);
if (bch_st & BIT(nfc->cfg->ecc0.err_flag_bit) ||
bch_st & BIT(nfc->cfg->ecc1.err_flag_bit)) {
mtd->ecc_stats.failed++;
ecc_fail = 1;
} else {
cnt = ECC_ERR_CNT(bch_st, nfc->cfg->ecc0);
mtd->ecc_stats.corrected += cnt;
max_bitflips = max_t(u32, max_bitflips, cnt);
cnt = ECC_ERR_CNT(bch_st, nfc->cfg->ecc1);
mtd->ecc_stats.corrected += cnt;
max_bitflips = max_t(u32, max_bitflips, cnt);
}
}
if (buf)
memcpy(buf, nfc->page_buf, mtd->writesize);
timeout_err:
if (boot_rom_mode && rknand->boot_ecc != ecc->strength)
rk_nfc_hw_ecc_setup(chip, ecc->strength);
if (ret)
return ret;
if (ecc_fail) {
dev_err(nfc->dev, "read page: %x ecc error!\n", page);
return 0;
}
return max_bitflips;
}
static int rk_nfc_read_oob(struct nand_chip *chip, int page)
{
return rk_nfc_read_page_hwecc(chip, NULL, 1, page);
}
static inline void rk_nfc_hw_init(struct rk_nfc *nfc)
{
/* Disable flash wp. */
writel(FMCTL_WP, nfc->regs + NFC_FMCTL);
/* Config default timing 40ns at 150 Mhz NFC clock. */
writel(0x1081, nfc->regs + NFC_FMWAIT);
nfc->cur_timing = 0x1081;
/* Disable randomizer and DMA. */
writel(0, nfc->regs + nfc->cfg->randmz_off);
writel(0, nfc->regs + nfc->cfg->dma_cfg_off);
writel(FLCTL_RST, nfc->regs + nfc->cfg->flctl_off);
}
static irqreturn_t rk_nfc_irq(int irq, void *id)
{
struct rk_nfc *nfc = id;
u32 sta, ien;
sta = readl_relaxed(nfc->regs + nfc->cfg->int_st_off);
ien = readl_relaxed(nfc->regs + nfc->cfg->int_en_off);
if (!(sta & ien))
return IRQ_NONE;
writel(sta, nfc->regs + nfc->cfg->int_clr_off);
writel(~sta & ien, nfc->regs + nfc->cfg->int_en_off);
complete(&nfc->done);
return IRQ_HANDLED;
}
static int rk_nfc_enable_clks(struct device *dev, struct rk_nfc *nfc)
{
int ret;
if (!IS_ERR(nfc->nfc_clk)) {
ret = clk_prepare_enable(nfc->nfc_clk);
if (ret) {
dev_err(dev, "failed to enable NFC clk\n");
return ret;
}
}
ret = clk_prepare_enable(nfc->ahb_clk);
if (ret) {
dev_err(dev, "failed to enable ahb clk\n");
clk_disable_unprepare(nfc->nfc_clk);
return ret;
}
return 0;
}
static void rk_nfc_disable_clks(struct rk_nfc *nfc)
{
clk_disable_unprepare(nfc->nfc_clk);
clk_disable_unprepare(nfc->ahb_clk);
}
static int rk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oob_region)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
if (section)
return -ERANGE;
oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2;
oob_region->offset = 2;
return 0;
}
static int rk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oob_region)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
if (section)
return -ERANGE;
oob_region->length = mtd->oobsize - rknand->metadata_size;
oob_region->offset = rknand->metadata_size;
return 0;
}
static const struct mtd_ooblayout_ops rk_nfc_ooblayout_ops = {
.free = rk_nfc_ooblayout_free,
.ecc = rk_nfc_ooblayout_ecc,
};
static int rk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct rk_nfc *nfc = nand_get_controller_data(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
const u8 *strengths = nfc->cfg->ecc_strengths;
u8 max_strength, nfc_max_strength;
int i;
nfc_max_strength = nfc->cfg->ecc_strengths[0];
/* If optional dt settings not present. */
if (!ecc->size || !ecc->strength ||
ecc->strength > nfc_max_strength) {
chip->ecc.size = 1024;
ecc->steps = mtd->writesize / ecc->size;
/*
* HW ECC always requests the number of ECC bytes per 1024 byte
* blocks. The first 4 OOB bytes are reserved for sys data.
*/
max_strength = ((mtd->oobsize / ecc->steps) - 4) * 8 /
fls(8 * 1024);
if (max_strength > nfc_max_strength)
max_strength = nfc_max_strength;
for (i = 0; i < 4; i++) {
if (max_strength >= strengths[i])
break;
}
if (i >= 4) {
dev_err(nfc->dev, "unsupported ECC strength\n");
return -EOPNOTSUPP;
}
ecc->strength = strengths[i];
}
ecc->steps = mtd->writesize / ecc->size;
ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * chip->ecc.size), 8);
return 0;
}
static int rk_nfc_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct device *dev = mtd->dev.parent;
struct rk_nfc *nfc = nand_get_controller_data(chip);
struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int new_page_len, new_oob_len;
void *buf;
int ret;
if (chip->options & NAND_BUSWIDTH_16) {
dev_err(dev, "16 bits bus width not supported");
return -EINVAL;
}
if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return 0;
ret = rk_nfc_ecc_init(dev, mtd);
if (ret)
return ret;
rknand->metadata_size = NFC_SYS_DATA_SIZE * ecc->steps;
if (rknand->metadata_size < NFC_SYS_DATA_SIZE + 2) {
dev_err(dev,
"driver needs at least %d bytes of meta data\n",
NFC_SYS_DATA_SIZE + 2);
return -EIO;
}
/* Check buffer first, avoid duplicate alloc buffer. */
new_page_len = mtd->writesize + mtd->oobsize;
if (nfc->page_buf && new_page_len > nfc->page_buf_size) {
buf = krealloc(nfc->page_buf, new_page_len,
GFP_KERNEL | GFP_DMA);
if (!buf)
return -ENOMEM;
nfc->page_buf = buf;
nfc->page_buf_size = new_page_len;
}
new_oob_len = ecc->steps * NFC_MAX_OOB_PER_STEP;
if (nfc->oob_buf && new_oob_len > nfc->oob_buf_size) {
buf = krealloc(nfc->oob_buf, new_oob_len,
GFP_KERNEL | GFP_DMA);
if (!buf) {
kfree(nfc->page_buf);
nfc->page_buf = NULL;
return -ENOMEM;
}
nfc->oob_buf = buf;
nfc->oob_buf_size = new_oob_len;
}
if (!nfc->page_buf) {
nfc->page_buf = kzalloc(new_page_len, GFP_KERNEL | GFP_DMA);
if (!nfc->page_buf)
return -ENOMEM;
nfc->page_buf_size = new_page_len;
}
if (!nfc->oob_buf) {
nfc->oob_buf = kzalloc(new_oob_len, GFP_KERNEL | GFP_DMA);
if (!nfc->oob_buf) {
kfree(nfc->page_buf);
nfc->page_buf = NULL;
return -ENOMEM;
}
nfc->oob_buf_size = new_oob_len;
}
chip->ecc.write_page_raw = rk_nfc_write_page_raw;
chip->ecc.write_page = rk_nfc_write_page_hwecc;
chip->ecc.write_oob = rk_nfc_write_oob;
chip->ecc.read_page_raw = rk_nfc_read_page_raw;
chip->ecc.read_page = rk_nfc_read_page_hwecc;
chip->ecc.read_oob = rk_nfc_read_oob;
return 0;
}
static const struct nand_controller_ops rk_nfc_controller_ops = {
.attach_chip = rk_nfc_attach_chip,
.exec_op = rk_nfc_exec_op,
.setup_interface = rk_nfc_setup_interface,
};
static int rk_nfc_nand_chip_init(struct device *dev, struct rk_nfc *nfc,
struct device_node *np)
{
struct rk_nfc_nand_chip *rknand;
struct nand_chip *chip;
struct mtd_info *mtd;
int nsels;
u32 tmp;
int ret;
int i;
if (!of_get_property(np, "reg", &nsels))
return -ENODEV;
nsels /= sizeof(u32);
if (!nsels || nsels > NFC_MAX_NSELS) {
dev_err(dev, "invalid reg property size %d\n", nsels);
return -EINVAL;
}
rknand = devm_kzalloc(dev, sizeof(*rknand) + nsels * sizeof(u8),
GFP_KERNEL);
if (!rknand)
return -ENOMEM;
rknand->nsels = nsels;
for (i = 0; i < nsels; i++) {
ret = of_property_read_u32_index(np, "reg", i, &tmp);
if (ret) {
dev_err(dev, "reg property failure : %d\n", ret);
return ret;
}
if (tmp >= NFC_MAX_NSELS) {
dev_err(dev, "invalid CS: %u\n", tmp);
return -EINVAL;
}
if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
dev_err(dev, "CS %u already assigned\n", tmp);
return -EINVAL;
}
rknand->sels[i] = tmp;
}
chip = &rknand->chip;
chip->controller = &nfc->controller;
nand_set_flash_node(chip, np);
nand_set_controller_data(chip, nfc);
chip->options |= NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE;
chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
/* Set default mode in case dt entry is missing. */
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
mtd = nand_to_mtd(chip);
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
if (!mtd->name) {
dev_err(nfc->dev, "NAND label property is mandatory\n");
return -EINVAL;
}
mtd_set_ooblayout(mtd, &rk_nfc_ooblayout_ops);
rk_nfc_hw_init(nfc);
ret = nand_scan(chip, nsels);
if (ret)
return ret;
if (chip->options & NAND_IS_BOOT_MEDIUM) {
ret = of_property_read_u32(np, "rockchip,boot-blks", &tmp);
rknand->boot_blks = ret ? 0 : tmp;
ret = of_property_read_u32(np, "rockchip,boot-ecc-strength",
&tmp);
rknand->boot_ecc = ret ? chip->ecc.strength : tmp;
}
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "MTD parse partition error\n");
nand_cleanup(chip);
return ret;
}
list_add_tail(&rknand->node, &nfc->chips);
return 0;
}
static void rk_nfc_chips_cleanup(struct rk_nfc *nfc)
{
struct rk_nfc_nand_chip *rknand, *tmp;
struct nand_chip *chip;
int ret;
list_for_each_entry_safe(rknand, tmp, &nfc->chips, node) {
chip = &rknand->chip;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
list_del(&rknand->node);
}
}
static int rk_nfc_nand_chips_init(struct device *dev, struct rk_nfc *nfc)
{
struct device_node *np = dev->of_node, *nand_np;
int nchips = of_get_child_count(np);
int ret;
if (!nchips || nchips > NFC_MAX_NSELS) {
dev_err(nfc->dev, "incorrect number of NAND chips (%d)\n",
nchips);
return -EINVAL;
}
for_each_child_of_node(np, nand_np) {
ret = rk_nfc_nand_chip_init(dev, nfc, nand_np);
if (ret) {
of_node_put(nand_np);
rk_nfc_chips_cleanup(nfc);
return ret;
}
}
return 0;
}
static struct nfc_cfg nfc_v6_cfg = {
.type = NFC_V6,
.ecc_strengths = {60, 40, 24, 16},
.ecc_cfgs = {
0x00040011, 0x00040001, 0x00000011, 0x00000001,
},
.flctl_off = 0x08,
.bchctl_off = 0x0C,
.dma_cfg_off = 0x10,
.dma_data_buf_off = 0x14,
.dma_oob_buf_off = 0x18,
.dma_st_off = 0x1C,
.bch_st_off = 0x20,
.randmz_off = 0x150,
.int_en_off = 0x16C,
.int_clr_off = 0x170,
.int_st_off = 0x174,
.oob0_off = 0x200,
.oob1_off = 0x230,
.ecc0 = {
.err_flag_bit = 2,
.low = 3,
.low_mask = 0x1F,
.low_bn = 5,
.high = 27,
.high_mask = 0x1,
},
.ecc1 = {
.err_flag_bit = 15,
.low = 16,
.low_mask = 0x1F,
.low_bn = 5,
.high = 29,
.high_mask = 0x1,
},
};
static struct nfc_cfg nfc_v8_cfg = {
.type = NFC_V8,
.ecc_strengths = {16, 16, 16, 16},
.ecc_cfgs = {
0x00000001, 0x00000001, 0x00000001, 0x00000001,
},
.flctl_off = 0x08,
.bchctl_off = 0x0C,
.dma_cfg_off = 0x10,
.dma_data_buf_off = 0x14,
.dma_oob_buf_off = 0x18,
.dma_st_off = 0x1C,
.bch_st_off = 0x20,
.randmz_off = 0x150,
.int_en_off = 0x16C,
.int_clr_off = 0x170,
.int_st_off = 0x174,
.oob0_off = 0x200,
.oob1_off = 0x230,
.ecc0 = {
.err_flag_bit = 2,
.low = 3,
.low_mask = 0x1F,
.low_bn = 5,
.high = 27,
.high_mask = 0x1,
},
.ecc1 = {
.err_flag_bit = 15,
.low = 16,
.low_mask = 0x1F,
.low_bn = 5,
.high = 29,
.high_mask = 0x1,
},
};
static struct nfc_cfg nfc_v9_cfg = {
.type = NFC_V9,
.ecc_strengths = {70, 60, 40, 16},
.ecc_cfgs = {
0x00000001, 0x06000001, 0x04000001, 0x02000001,
},
.flctl_off = 0x10,
.bchctl_off = 0x20,
.dma_cfg_off = 0x30,
.dma_data_buf_off = 0x34,
.dma_oob_buf_off = 0x38,
.dma_st_off = 0x3C,
.bch_st_off = 0x150,
.randmz_off = 0x208,
.int_en_off = 0x120,
.int_clr_off = 0x124,
.int_st_off = 0x128,
.oob0_off = 0x200,
.oob1_off = 0x204,
.ecc0 = {
.err_flag_bit = 2,
.low = 3,
.low_mask = 0x7F,
.low_bn = 7,
.high = 0,
.high_mask = 0x0,
},
.ecc1 = {
.err_flag_bit = 18,
.low = 19,
.low_mask = 0x7F,
.low_bn = 7,
.high = 0,
.high_mask = 0x0,
},
};
static const struct of_device_id rk_nfc_id_table[] = {
{
.compatible = "rockchip,px30-nfc",
.data = &nfc_v9_cfg
},
{
.compatible = "rockchip,rk2928-nfc",
.data = &nfc_v6_cfg
},
{
.compatible = "rockchip,rv1108-nfc",
.data = &nfc_v8_cfg
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rk_nfc_id_table);
static int rk_nfc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rk_nfc *nfc;
int ret, irq;
nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
if (!nfc)
return -ENOMEM;
nand_controller_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
nfc->controller.ops = &rk_nfc_controller_ops;
nfc->cfg = of_device_get_match_data(dev);
nfc->dev = dev;
init_completion(&nfc->done);
nfc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nfc->regs)) {
ret = PTR_ERR(nfc->regs);
goto release_nfc;
}
nfc->nfc_clk = devm_clk_get(dev, "nfc");
if (IS_ERR(nfc->nfc_clk)) {
dev_dbg(dev, "no NFC clk\n");
/* Some earlier models, such as rk3066, have no NFC clk. */
}
nfc->ahb_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(nfc->ahb_clk)) {
dev_err(dev, "no ahb clk\n");
ret = PTR_ERR(nfc->ahb_clk);
goto release_nfc;
}
ret = rk_nfc_enable_clks(dev, nfc);
if (ret)
goto release_nfc;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -EINVAL;
goto clk_disable;
}
writel(0, nfc->regs + nfc->cfg->int_en_off);
ret = devm_request_irq(dev, irq, rk_nfc_irq, 0x0, "rk-nand", nfc);
if (ret) {
dev_err(dev, "failed to request NFC irq\n");
goto clk_disable;
}
platform_set_drvdata(pdev, nfc);
ret = rk_nfc_nand_chips_init(dev, nfc);
if (ret) {
dev_err(dev, "failed to init NAND chips\n");
goto clk_disable;
}
return 0;
clk_disable:
rk_nfc_disable_clks(nfc);
release_nfc:
return ret;
}
static void rk_nfc_remove(struct platform_device *pdev)
{
struct rk_nfc *nfc = platform_get_drvdata(pdev);
kfree(nfc->page_buf);
kfree(nfc->oob_buf);
rk_nfc_chips_cleanup(nfc);
rk_nfc_disable_clks(nfc);
}
static int __maybe_unused rk_nfc_suspend(struct device *dev)
{
struct rk_nfc *nfc = dev_get_drvdata(dev);
rk_nfc_disable_clks(nfc);
return 0;
}
static int __maybe_unused rk_nfc_resume(struct device *dev)
{
struct rk_nfc *nfc = dev_get_drvdata(dev);
struct rk_nfc_nand_chip *rknand;
struct nand_chip *chip;
int ret;
u32 i;
ret = rk_nfc_enable_clks(dev, nfc);
if (ret)
return ret;
/* Reset NAND chip if VCC was powered off. */
list_for_each_entry(rknand, &nfc->chips, node) {
chip = &rknand->chip;
for (i = 0; i < rknand->nsels; i++)
nand_reset(chip, i);
}
return 0;
}
static const struct dev_pm_ops rk_nfc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(rk_nfc_suspend, rk_nfc_resume)
};
static struct platform_driver rk_nfc_driver = {
.probe = rk_nfc_probe,
.remove_new = rk_nfc_remove,
.driver = {
.name = "rockchip-nfc",
.of_match_table = rk_nfc_id_table,
.pm = &rk_nfc_pm_ops,
},
};
module_platform_driver(rk_nfc_driver);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_AUTHOR("Yifeng Zhao <[email protected]>");
MODULE_DESCRIPTION("Rockchip Nand Flash Controller Driver");
MODULE_ALIAS("platform:rockchip-nand-controller");
| linux-master | drivers/mtd/nand/raw/rockchip-nand-controller.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.