Merge branch 'master' of git://git.denx.de/u-boot-spi

This is the PR for SPI-NAND changes along with few spi changes.

[trini: Re-sync changes for ls1012afrwy_qspi*_defconfig]
Signed-off-by: Tom Rini <trini@konsulko.com>
This commit is contained in:
Tom Rini 2018-10-02 13:02:22 -04:00
commit 592cd5defd
104 changed files with 6002 additions and 1044 deletions

View file

@ -299,7 +299,7 @@ F: drivers/i2c/i2c-cdns.c
F: drivers/i2c/muxes/pca954x.c
F: drivers/i2c/zynq_i2c.c
F: drivers/mmc/zynq_sdhci.c
F: drivers/mtd/nand/zynq_nand.c
F: drivers/mtd/nand/raw/zynq_nand.c
F: drivers/net/phy/xilinx_phy.c
F: drivers/net/zynq_gem.c
F: drivers/serial/serial_zynq.c
@ -323,7 +323,7 @@ F: drivers/i2c/i2c-cdns.c
F: drivers/i2c/muxes/pca954x.c
F: drivers/i2c/zynq_i2c.c
F: drivers/mmc/zynq_sdhci.c
F: drivers/mtd/nand/zynq_nand.c
F: drivers/mtd/nand/raw/zynq_nand.c
F: drivers/net/phy/xilinx_phy.c
F: drivers/net/zynq_gem.c
F: drivers/serial/serial_zynq.c
@ -478,7 +478,7 @@ NAND FLASH
#M: Scott Wood <oss@buserror.net>
S: Orphaned (Since 2018-07)
T: git git://git.denx.de/u-boot-nand-flash.git
F: drivers/mtd/nand/
F: drivers/mtd/nand/raw/
NDS32
M: Macpaul Lin <macpaul@andestech.com>

View file

@ -689,7 +689,7 @@ libs-y += drivers/dma/
libs-y += drivers/gpio/
libs-y += drivers/i2c/
libs-y += drivers/mtd/
libs-$(CONFIG_CMD_NAND) += drivers/mtd/nand/
libs-$(CONFIG_CMD_NAND) += drivers/mtd/nand/raw/
libs-y += drivers/mtd/onenand/
libs-$(CONFIG_CMD_UBI) += drivers/mtd/ubi/
libs-y += drivers/mtd/spi/

6
README
View file

@ -3237,8 +3237,8 @@ Low Level (hardware related) configuration options:
a 16 bit bus.
Not all NAND drivers use this symbol.
Example of drivers that use it:
- drivers/mtd/nand/ndfc.c
- drivers/mtd/nand/mxc_nand.c
- drivers/mtd/nand/raw/ndfc.c
- drivers/mtd/nand/raw/mxc_nand.c
- CONFIG_SYS_NDFC_EBC0_CFG
Sets the EBC0_CFG register for the NDFC. If not defined
@ -3355,7 +3355,7 @@ Low Level (hardware related) configuration options:
- CONFIG_SYS_NAND_NO_SUBPAGE_WRITE
Option to disable subpage write in NAND driver
driver that uses this:
drivers/mtd/nand/davinci_nand.c
drivers/mtd/nand/raw/davinci_nand.c
Freescale QE/FMAN Firmware Support:
-----------------------------------

View file

@ -12,7 +12,7 @@
#include <stdio.h>
#include <linux/io.h>
#include <linux/printk.h>
#include <../drivers/mtd/nand/denali.h>
#include <../drivers/mtd/nand/raw/denali.h>
#include "init.h"

View file

@ -866,6 +866,12 @@ config CMD_MMC_SWRITE
Enable support for the "mmc swrite" command to write Android sparse
images to eMMC.
config CMD_MTD
bool "mtd"
select MTD_PARTITIONS
help
MTD commands support.
config CMD_NAND
bool "nand"
default y if NAND_SUNXI
@ -1714,18 +1720,22 @@ config CMD_MTDPARTS
bool "MTD partition support"
select MTD_DEVICE if (CMD_NAND || NAND)
help
MTD partition support
MTD partitioning tool support.
It is strongly encouraged to avoid using this command
anymore along with 'sf', 'nand', 'onenand'. One can still
declare the partitions in the mtdparts environment variable
but better use the MTD stack and the 'mtd' command instead.
config MTDIDS_DEFAULT
string "Default MTD IDs"
depends on CMD_MTDPARTS || CMD_NAND || CMD_FLASH
depends on CMD_MTD || CMD_MTDPARTS || CMD_NAND || CMD_FLASH
help
Defines a default MTD IDs list for use with MTD partitions in the
Linux MTD command line partitions format.
config MTDPARTS_DEFAULT
string "Default MTD partition scheme"
depends on CMD_MTDPARTS || CMD_NAND || CMD_FLASH
depends on CMD_MTD || CMD_MTDPARTS || CMD_NAND || CMD_FLASH
help
Defines a default MTD partitioning scheme in the Linux MTD command
line partitions format
@ -1855,6 +1865,8 @@ config CMD_UBI
capabilities. Please, consult the MTD web site for more details
(www.linux-mtd.infradead.org). Activate this option if you want
to use U-Boot UBI commands.
It is also strongly encouraged to also enable CONFIG_MTD to get full
partition support.
config CMD_UBIFS
tristate "Enable UBIFS - Unsorted block images filesystem commands"

View file

@ -93,6 +93,7 @@ obj-$(CONFIG_CMD_MISC) += misc.o
obj-$(CONFIG_CMD_MMC) += mmc.o
obj-$(CONFIG_CMD_MMC_SPI) += mmc_spi.o
obj-$(CONFIG_MP) += mp.o
obj-$(CONFIG_CMD_MTD) += mtd.o
obj-$(CONFIG_CMD_MTDPARTS) += mtdparts.o
obj-$(CONFIG_CMD_NAND) += nand.o
obj-$(CONFIG_CMD_NET) += net.o

473
cmd/mtd.c Normal file
View file

@ -0,0 +1,473 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* mtd.c
*
* Generic command to handle basic operations on any memory device.
*
* Copyright: Bootlin, 2018
* Author: Miquèl Raynal <miquel.raynal@bootlin.com>
*/
#include <command.h>
#include <common.h>
#include <console.h>
#include <malloc.h>
#include <mapmem.h>
#include <mtd.h>
static uint mtd_len_to_pages(struct mtd_info *mtd, u64 len)
{
do_div(len, mtd->writesize);
return len;
}
static bool mtd_is_aligned_with_min_io_size(struct mtd_info *mtd, u64 size)
{
return !do_div(size, mtd->writesize);
}
static bool mtd_is_aligned_with_block_size(struct mtd_info *mtd, u64 size)
{
return !do_div(size, mtd->erasesize);
}
static void mtd_dump_buf(const u8 *buf, uint len, uint offset)
{
int i, j;
for (i = 0; i < len; ) {
printf("0x%08x:\t", offset + i);
for (j = 0; j < 8; j++)
printf("%02x ", buf[i + j]);
printf(" ");
i += 8;
for (j = 0; j < 8; j++)
printf("%02x ", buf[i + j]);
printf("\n");
i += 8;
}
}
static void mtd_dump_device_buf(struct mtd_info *mtd, u64 start_off,
const u8 *buf, u64 len, bool woob)
{
bool has_pages = mtd->type == MTD_NANDFLASH ||
mtd->type == MTD_MLCNANDFLASH;
int npages = mtd_len_to_pages(mtd, len);
uint page;
if (has_pages) {
for (page = 0; page < npages; page++) {
u64 data_off = page * mtd->writesize;
printf("\nDump %d data bytes from 0x%08llx:\n",
mtd->writesize, start_off + data_off);
mtd_dump_buf(&buf[data_off],
mtd->writesize, start_off + data_off);
if (woob) {
u64 oob_off = page * mtd->oobsize;
printf("Dump %d OOB bytes from page at 0x%08llx:\n",
mtd->oobsize, start_off + data_off);
mtd_dump_buf(&buf[len + oob_off],
mtd->oobsize, 0);
}
}
} else {
printf("\nDump %lld data bytes from 0x%llx:\n",
len, start_off);
mtd_dump_buf(buf, len, start_off);
}
}
static void mtd_show_parts(struct mtd_info *mtd, int level)
{
struct mtd_info *part;
int i;
list_for_each_entry(part, &mtd->partitions, node) {
for (i = 0; i < level; i++)
printf("\t");
printf(" - 0x%012llx-0x%012llx : \"%s\"\n",
part->offset, part->offset + part->size, part->name);
mtd_show_parts(part, level + 1);
}
}
static void mtd_show_device(struct mtd_info *mtd)
{
/* Device */
printf("* %s\n", mtd->name);
#if defined(CONFIG_DM)
if (mtd->dev) {
printf(" - device: %s\n", mtd->dev->name);
printf(" - parent: %s\n", mtd->dev->parent->name);
printf(" - driver: %s\n", mtd->dev->driver->name);
}
#endif
/* MTD device information */
printf(" - type: ");
switch (mtd->type) {
case MTD_RAM:
printf("RAM\n");
break;
case MTD_ROM:
printf("ROM\n");
break;
case MTD_NORFLASH:
printf("NOR flash\n");
break;
case MTD_NANDFLASH:
printf("NAND flash\n");
break;
case MTD_DATAFLASH:
printf("Data flash\n");
break;
case MTD_UBIVOLUME:
printf("UBI volume\n");
break;
case MTD_MLCNANDFLASH:
printf("MLC NAND flash\n");
break;
case MTD_ABSENT:
default:
printf("Unknown\n");
break;
}
printf(" - block size: 0x%x bytes\n", mtd->erasesize);
printf(" - min I/O: 0x%x bytes\n", mtd->writesize);
if (mtd->oobsize) {
printf(" - OOB size: %u bytes\n", mtd->oobsize);
printf(" - OOB available: %u bytes\n", mtd->oobavail);
}
if (mtd->ecc_strength) {
printf(" - ECC strength: %u bits\n", mtd->ecc_strength);
printf(" - ECC step size: %u bytes\n", mtd->ecc_step_size);
printf(" - bitflip threshold: %u bits\n",
mtd->bitflip_threshold);
}
printf(" - 0x%012llx-0x%012llx : \"%s\"\n",
mtd->offset, mtd->offset + mtd->size, mtd->name);
/* MTD partitions, if any */
mtd_show_parts(mtd, 1);
}
/* Logic taken from fs/ubifs/recovery.c:is_empty() */
static bool mtd_oob_write_is_empty(struct mtd_oob_ops *op)
{
int i;
for (i = 0; i < op->len; i++)
if (op->datbuf[i] != 0xff)
return false;
for (i = 0; i < op->ooblen; i++)
if (op->oobbuf[i] != 0xff)
return false;
return true;
}
static int do_mtd_list(void)
{
struct mtd_info *mtd;
int dev_nb = 0;
/* Ensure all devices (and their partitions) are probed */
mtd_probe_devices();
printf("List of MTD devices:\n");
mtd_for_each_device(mtd) {
if (!mtd_is_partition(mtd))
mtd_show_device(mtd);
dev_nb++;
}
if (!dev_nb) {
printf("No MTD device found\n");
return CMD_RET_FAILURE;
}
return CMD_RET_SUCCESS;
}
static int mtd_special_write_oob(struct mtd_info *mtd, u64 off,
struct mtd_oob_ops *io_op,
bool write_empty_pages, bool woob)
{
int ret = 0;
/*
* By default, do not write an empty page.
* Skip it by simulating a successful write.
*/
if (!write_empty_pages && mtd_oob_write_is_empty(io_op)) {
io_op->retlen = mtd->writesize;
io_op->oobretlen = woob ? mtd->oobsize : 0;
} else {
ret = mtd_write_oob(mtd, off, io_op);
}
return ret;
}
static int do_mtd(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
{
struct mtd_info *mtd;
const char *cmd;
char *mtd_name;
/* All MTD commands need at least two arguments */
if (argc < 2)
return CMD_RET_USAGE;
/* Parse the command name and its optional suffixes */
cmd = argv[1];
/* List the MTD devices if that is what the user wants */
if (strcmp(cmd, "list") == 0)
return do_mtd_list();
/*
* The remaining commands require also at least a device ID.
* Check the selected device is valid. Ensure it is probed.
*/
if (argc < 3)
return CMD_RET_USAGE;
mtd_name = argv[2];
mtd_probe_devices();
mtd = get_mtd_device_nm(mtd_name);
if (IS_ERR_OR_NULL(mtd)) {
printf("MTD device %s not found, ret %ld\n",
mtd_name, PTR_ERR(mtd));
return CMD_RET_FAILURE;
}
put_mtd_device(mtd);
argc -= 3;
argv += 3;
/* Do the parsing */
if (!strncmp(cmd, "read", 4) || !strncmp(cmd, "dump", 4) ||
!strncmp(cmd, "write", 5)) {
bool has_pages = mtd->type == MTD_NANDFLASH ||
mtd->type == MTD_MLCNANDFLASH;
bool dump, read, raw, woob, write_empty_pages;
struct mtd_oob_ops io_op = {};
uint user_addr = 0, npages;
u64 start_off, off, len, remaining, default_len;
u32 oob_len;
u8 *buf;
int ret;
dump = !strncmp(cmd, "dump", 4);
read = dump || !strncmp(cmd, "read", 4);
raw = strstr(cmd, ".raw");
woob = strstr(cmd, ".oob");
write_empty_pages = !has_pages || strstr(cmd, ".dontskipff");
if (!dump) {
if (!argc)
return CMD_RET_USAGE;
user_addr = simple_strtoul(argv[0], NULL, 16);
argc--;
argv++;
}
start_off = argc > 0 ? simple_strtoul(argv[0], NULL, 16) : 0;
if (!mtd_is_aligned_with_min_io_size(mtd, start_off)) {
printf("Offset not aligned with a page (0x%x)\n",
mtd->writesize);
return CMD_RET_FAILURE;
}
default_len = dump ? mtd->writesize : mtd->size;
len = argc > 1 ? simple_strtoul(argv[1], NULL, 16) :
default_len;
if (!mtd_is_aligned_with_min_io_size(mtd, len)) {
len = round_up(len, mtd->writesize);
printf("Size not on a page boundary (0x%x), rounding to 0x%llx\n",
mtd->writesize, len);
}
remaining = len;
npages = mtd_len_to_pages(mtd, len);
oob_len = woob ? npages * mtd->oobsize : 0;
if (dump)
buf = kmalloc(len + oob_len, GFP_KERNEL);
else
buf = map_sysmem(user_addr, 0);
if (!buf) {
printf("Could not map/allocate the user buffer\n");
return CMD_RET_FAILURE;
}
if (has_pages)
printf("%s %lld byte(s) (%d page(s)) at offset 0x%08llx%s%s%s\n",
read ? "Reading" : "Writing", len, npages, start_off,
raw ? " [raw]" : "", woob ? " [oob]" : "",
!read && write_empty_pages ? " [dontskipff]" : "");
else
printf("%s %lld byte(s) at offset 0x%08llx\n",
read ? "Reading" : "Writing", len, start_off);
io_op.mode = raw ? MTD_OPS_RAW : MTD_OPS_AUTO_OOB;
io_op.len = has_pages ? mtd->writesize : len;
io_op.ooblen = woob ? mtd->oobsize : 0;
io_op.datbuf = buf;
io_op.oobbuf = woob ? &buf[len] : NULL;
/* Search for the first good block after the given offset */
off = start_off;
while (mtd_block_isbad(mtd, off))
off += mtd->erasesize;
/* Loop over the pages to do the actual read/write */
while (remaining) {
/* Skip the block if it is bad */
if (mtd_is_aligned_with_block_size(mtd, off) &&
mtd_block_isbad(mtd, off)) {
off += mtd->erasesize;
continue;
}
if (read)
ret = mtd_read_oob(mtd, off, &io_op);
else
ret = mtd_special_write_oob(mtd, off, &io_op,
write_empty_pages,
woob);
if (ret) {
printf("Failure while %s at offset 0x%llx\n",
read ? "reading" : "writing", off);
return CMD_RET_FAILURE;
}
off += io_op.retlen;
remaining -= io_op.retlen;
io_op.datbuf += io_op.retlen;
io_op.oobbuf += io_op.oobretlen;
}
if (!ret && dump)
mtd_dump_device_buf(mtd, start_off, buf, len, woob);
if (dump)
kfree(buf);
else
unmap_sysmem(buf);
if (ret) {
printf("%s on %s failed with error %d\n",
read ? "Read" : "Write", mtd->name, ret);
return CMD_RET_FAILURE;
}
} else if (!strcmp(cmd, "erase")) {
bool scrub = strstr(cmd, ".dontskipbad");
struct erase_info erase_op = {};
u64 off, len;
int ret;
off = argc > 0 ? simple_strtoul(argv[0], NULL, 16) : 0;
len = argc > 1 ? simple_strtoul(argv[1], NULL, 16) : mtd->size;
if (!mtd_is_aligned_with_block_size(mtd, off)) {
printf("Offset not aligned with a block (0x%x)\n",
mtd->erasesize);
return CMD_RET_FAILURE;
}
if (!mtd_is_aligned_with_block_size(mtd, len)) {
printf("Size not a multiple of a block (0x%x)\n",
mtd->erasesize);
return CMD_RET_FAILURE;
}
printf("Erasing 0x%08llx ... 0x%08llx (%d eraseblock(s))\n",
off, off + len - 1, mtd_div_by_eb(len, mtd));
erase_op.mtd = mtd;
erase_op.addr = off;
erase_op.len = len;
erase_op.scrub = scrub;
while (erase_op.len) {
ret = mtd_erase(mtd, &erase_op);
/* Abort if its not a bad block error */
if (ret != -EIO)
break;
printf("Skipping bad block at 0x%08llx\n",
erase_op.fail_addr);
/* Skip bad block and continue behind it */
erase_op.len -= erase_op.fail_addr - erase_op.addr;
erase_op.len -= mtd->erasesize;
erase_op.addr = erase_op.fail_addr + mtd->erasesize;
}
if (ret && ret != -EIO)
return CMD_RET_FAILURE;
} else if (!strcmp(cmd, "bad")) {
loff_t off;
if (!mtd_can_have_bb(mtd)) {
printf("Only NAND-based devices can have bad blocks\n");
return CMD_RET_SUCCESS;
}
printf("MTD device %s bad blocks list:\n", mtd->name);
for (off = 0; off < mtd->size; off += mtd->erasesize)
if (mtd_block_isbad(mtd, off))
printf("\t0x%08llx\n", off);
} else {
return CMD_RET_USAGE;
}
return CMD_RET_SUCCESS;
}
static char mtd_help_text[] =
#ifdef CONFIG_SYS_LONGHELP
"- generic operations on memory technology devices\n\n"
"mtd list\n"
"mtd read[.raw][.oob] <name> <addr> [<off> [<size>]]\n"
"mtd dump[.raw][.oob] <name> [<off> [<size>]]\n"
"mtd write[.raw][.oob][.dontskipff] <name> <addr> [<off> [<size>]]\n"
"mtd erase[.dontskipbad] <name> [<off> [<size>]]\n"
"\n"
"Specific functions:\n"
"mtd bad <name>\n"
"\n"
"With:\n"
"\t<name>: NAND partition/chip name\n"
"\t<addr>: user address from/to which data will be retrieved/stored\n"
"\t<off>: offset in <name> in bytes (default: start of the part)\n"
"\t\t* must be block-aligned for erase\n"
"\t\t* must be page-aligned otherwise\n"
"\t<size>: length of the operation in bytes (default: the entire device)\n"
"\t\t* must be a multiple of a block for erase\n"
"\t\t* must be a multiple of a page otherwise (special case: default is a page with dump)\n"
"\n"
"The .dontskipff option forces writing empty pages, don't use it if unsure.\n"
#endif
"";
U_BOOT_CMD(mtd, 10, 1, do_mtd, "MTD utils", mtd_help_text);

View file

@ -37,14 +37,14 @@
* mtdids=<idmap>[,<idmap>,...]
*
* <idmap> := <dev-id>=<mtd-id>
* <dev-id> := 'nand'|'nor'|'onenand'<dev-num>
* <dev-id> := 'nand'|'nor'|'onenand'|'spi-nand'<dev-num>
* <dev-num> := mtd device number, 0...
* <mtd-id> := unique device tag used by linux kernel to find mtd device (mtd->name)
*
*
* 'mtdparts' - partition list
*
* mtdparts=mtdparts=<mtd-def>[;<mtd-def>...]
* mtdparts=[mtdparts=]<mtd-def>[;<mtd-def>...]
*
* <mtd-def> := <mtd-id>:<part-def>[,<part-def>...]
* <mtd-id> := unique device tag used by linux kernel to find mtd device (mtd->name)
@ -62,11 +62,11 @@
*
* 1 NOR Flash, with 1 single writable partition:
* mtdids=nor0=edb7312-nor
* mtdparts=mtdparts=edb7312-nor:-
* mtdparts=[mtdparts=]edb7312-nor:-
*
* 1 NOR Flash with 2 partitions, 1 NAND with one
* mtdids=nor0=edb7312-nor,nand0=edb7312-nand
* mtdparts=mtdparts=edb7312-nor:256k(ARMboot)ro,-(root);edb7312-nand:-(home)
* mtdparts=[mtdparts=]edb7312-nor:256k(ARMboot)ro,-(root);edb7312-nand:-(home)
*
*/
@ -177,13 +177,16 @@ static u64 memsize_parse (const char *const ptr, const char **retptr)
case 'G':
case 'g':
ret <<= 10;
/* Fallthrough */
case 'M':
case 'm':
ret <<= 10;
/* Fallthrough */
case 'K':
case 'k':
ret <<= 10;
(*retptr)++;
/* Fallthrough */
default:
break;
}
@ -336,7 +339,7 @@ static int part_validate_eraseblock(struct mtdids *id, struct part_info *part)
if (!mtd->numeraseregions) {
/*
* Only one eraseregion (NAND, OneNAND or uniform NOR),
* Only one eraseregion (NAND, SPI-NAND, OneNAND or uniform NOR),
* checking for alignment is easy here
*/
offset = part->offset;
@ -1027,7 +1030,7 @@ static struct mtdids* id_find_by_mtd_id(const char *mtd_id, unsigned int mtd_id_
}
/**
* Parse device id string <dev-id> := 'nand'|'nor'|'onenand'<dev-num>,
* Parse device id string <dev-id> := 'nand'|'nor'|'onenand'|'spi-nand'<dev-num>,
* return device type and number.
*
* @param id string describing device id
@ -1051,6 +1054,9 @@ int mtd_id_parse(const char *id, const char **ret_id, u8 *dev_type,
} else if (strncmp(p, "onenand", 7) == 0) {
*dev_type = MTD_DEV_TYPE_ONENAND;
p += 7;
} else if (strncmp(p, "spi-nand", 8) == 0) {
*dev_type = MTD_DEV_TYPE_SPINAND;
p += 8;
} else {
printf("incorrect device type in %s\n", id);
return 1;
@ -1093,9 +1099,6 @@ static int generate_mtdparts(char *buf, u32 buflen)
return 0;
}
strcpy(p, "mtdparts=");
p += 9;
list_for_each(dentry, &devices) {
dev = list_entry(dentry, struct mtd_device, link);
@ -1566,11 +1569,9 @@ static int parse_mtdparts(const char *const mtdparts)
if (!p)
p = mtdparts;
if (strncmp(p, "mtdparts=", 9) != 0) {
printf("mtdparts variable doesn't start with 'mtdparts='\n");
return err;
}
p += 9;
/* Skip the useless prefix, if any */
if (strncmp(p, "mtdparts=", 9) == 0)
p += 9;
while (*p != '\0') {
err = 1;
@ -1633,7 +1634,7 @@ static int parse_mtdids(const char *const ids)
while(p && (*p != '\0')) {
ret = 1;
/* parse 'nor'|'nand'|'onenand'<dev-num> */
/* parse 'nor'|'nand'|'onenand'|'spi-nand'<dev-num> */
if (mtd_id_parse(p, &p, &type, &num) != 0)
break;
@ -2109,7 +2110,7 @@ static char mtdparts_help_text[] =
"'mtdids' - linux kernel mtd device id <-> u-boot device id mapping\n\n"
"mtdids=<idmap>[,<idmap>,...]\n\n"
"<idmap> := <dev-id>=<mtd-id>\n"
"<dev-id> := 'nand'|'nor'|'onenand'<dev-num>\n"
"<dev-id> := 'nand'|'nor'|'onenand'|'spi-nand'<dev-num>\n"
"<dev-num> := mtd device number, 0...\n"
"<mtd-id> := unique device tag used by linux kernel to find mtd device (mtd->name)\n\n"
"'mtdparts' - partition list\n\n"

101
cmd/ubi.c
View file

@ -15,6 +15,7 @@
#include <command.h>
#include <exports.h>
#include <memalign.h>
#include <mtd.h>
#include <nand.h>
#include <onenand_uboot.h>
#include <linux/mtd/mtd.h>
@ -27,24 +28,8 @@
#undef ubi_msg
#define ubi_msg(fmt, ...) printf("UBI: " fmt "\n", ##__VA_ARGS__)
#define DEV_TYPE_NONE 0
#define DEV_TYPE_NAND 1
#define DEV_TYPE_ONENAND 2
#define DEV_TYPE_NOR 3
/* Private own data */
static struct ubi_device *ubi;
static char buffer[80];
static int ubi_initialized;
struct selected_dev {
char part_name[80];
int selected;
int nr;
struct mtd_info *mtd_info;
};
static struct selected_dev ubi_dev;
#ifdef CONFIG_CMD_UBIFS
#include <ubifs_uboot.h>
@ -408,43 +393,24 @@ int ubi_volume_read(char *volume, char *buf, size_t size)
return err;
}
static int ubi_dev_scan(struct mtd_info *info, char *ubidev,
const char *vid_header_offset)
static int ubi_dev_scan(struct mtd_info *info, const char *vid_header_offset)
{
struct mtd_device *dev;
struct part_info *part;
struct mtd_partition mtd_part;
char ubi_mtd_param_buffer[80];
u8 pnum;
int err;
if (find_dev_and_part(ubidev, &dev, &pnum, &part) != 0)
return 1;
if (!vid_header_offset)
sprintf(ubi_mtd_param_buffer, "%s", info->name);
else
sprintf(ubi_mtd_param_buffer, "%s,%s", info->name,
vid_header_offset);
sprintf(buffer, "mtd=%d", pnum);
memset(&mtd_part, 0, sizeof(mtd_part));
mtd_part.name = buffer;
mtd_part.size = part->size;
mtd_part.offset = part->offset;
add_mtd_partitions(info, &mtd_part, 1);
strcpy(ubi_mtd_param_buffer, buffer);
if (vid_header_offset)
sprintf(ubi_mtd_param_buffer, "mtd=%d,%s", pnum,
vid_header_offset);
err = ubi_mtd_param_parse(ubi_mtd_param_buffer, NULL);
if (err) {
del_mtd_partitions(info);
if (err)
return -err;
}
err = ubi_init();
if (err) {
del_mtd_partitions(info);
if (err)
return -err;
}
ubi_initialized = 1;
return 0;
}
@ -469,50 +435,33 @@ int ubi_detach(void)
/*
* Call ubi_exit() before re-initializing the UBI subsystem
*/
if (ubi_initialized) {
if (ubi)
ubi_exit();
del_mtd_partitions(ubi_dev.mtd_info);
ubi_initialized = 0;
}
ubi_dev.selected = 0;
ubi = NULL;
return 0;
}
int ubi_part(char *part_name, const char *vid_header_offset)
{
struct mtd_info *mtd;
int err = 0;
char mtd_dev[16];
struct mtd_device *dev;
struct part_info *part;
u8 pnum;
ubi_detach();
/*
* Search the mtd device number where this partition
* is located
*/
if (find_dev_and_part(part_name, &dev, &pnum, &part)) {
mtd_probe_devices();
mtd = get_mtd_device_nm(part_name);
if (IS_ERR(mtd)) {
printf("Partition %s not found!\n", part_name);
return 1;
}
sprintf(mtd_dev, "%s%d", MTD_DEV_TYPE(dev->id->type), dev->id->num);
ubi_dev.mtd_info = get_mtd_device_nm(mtd_dev);
if (IS_ERR(ubi_dev.mtd_info)) {
printf("Partition %s not found on device %s!\n", part_name,
mtd_dev);
return 1;
}
put_mtd_device(mtd);
ubi_dev.selected = 1;
strcpy(ubi_dev.part_name, part_name);
err = ubi_dev_scan(ubi_dev.mtd_info, ubi_dev.part_name,
vid_header_offset);
err = ubi_dev_scan(mtd, vid_header_offset);
if (err) {
printf("UBI init error %d\n", err);
printf("Please check, if the correct MTD partition is used (size big enough?)\n");
ubi_dev.selected = 0;
return err;
}
@ -543,13 +492,13 @@ static int do_ubi(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
/* Print current partition */
if (argc == 2) {
if (!ubi_dev.selected) {
printf("Error, no UBI device/partition selected!\n");
if (!ubi) {
printf("Error, no UBI device selected!\n");
return 1;
}
printf("Device %d: %s, partition %s\n",
ubi_dev.nr, ubi_dev.mtd_info->name, ubi_dev.part_name);
printf("Device %d: %s, MTD partition %s\n",
ubi->ubi_num, ubi->ubi_name, ubi->mtd->name);
return 0;
}
@ -562,8 +511,8 @@ static int do_ubi(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
return ubi_part(argv[2], vid_header_offset);
}
if ((strcmp(argv[1], "part") != 0) && (!ubi_dev.selected)) {
printf("Error, no UBI device/partition selected!\n");
if ((strcmp(argv[1], "part") != 0) && !ubi) {
printf("Error, no UBI device selected!\n");
return 1;
}

View file

@ -487,7 +487,7 @@ config SPL_NAND_SUPPORT
help
Enable support for NAND (Negative AND) flash in SPL. NAND flash
can be used to allow SPL to load U-Boot from supported devices.
This enables the drivers in drivers/mtd/nand as part of an SPL
This enables the drivers in drivers/mtd/nand/raw as part of an SPL
build.
config SPL_NET_SUPPORT

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2011 OMICRON electronics GmbH
*
* based on drivers/mtd/nand/nand_spl_load.c
* based on drivers/mtd/nand/raw/nand_spl_load.c
*
* Copyright (C) 2011
* Heiko Schocher, DENX Software Engineering, hs@denx.de.

View file

@ -30,6 +30,7 @@ CONFIG_NET_RANDOM_ETHADDR=y
CONFIG_DM=y
# CONFIG_BLK is not set
CONFIG_DM_MMC=y
CONFIG_DM_SPI_FLASH=y
CONFIG_SPI_FLASH=y
CONFIG_SPI_FLASH_WINBOND=y
CONFIG_FSL_PFE=y
@ -40,6 +41,8 @@ CONFIG_DM_PCI=y
CONFIG_DM_PCI_COMPAT=y
CONFIG_PCIE_LAYERSCAPE=y
CONFIG_SYS_NS16550=y
CONFIG_SPI=y
CONFIG_DM_SPI=y
CONFIG_USB=y
CONFIG_DM_USB=y
CONFIG_USB_XHCI_HCD=y

View file

@ -31,6 +31,7 @@ CONFIG_NET_RANDOM_ETHADDR=y
CONFIG_DM=y
# CONFIG_BLK is not set
CONFIG_DM_MMC=y
CONFIG_DM_SPI_FLASH=y
CONFIG_SPI_FLASH=y
CONFIG_SPI_FLASH_WINBOND=y
CONFIG_FSL_PFE=y
@ -41,6 +42,8 @@ CONFIG_DM_PCI=y
CONFIG_DM_PCI_COMPAT=y
CONFIG_PCIE_LAYERSCAPE=y
CONFIG_SYS_NS16550=y
CONFIG_SPI=y
CONFIG_DM_SPI=y
CONFIG_USB=y
CONFIG_DM_USB=y
CONFIG_USB_XHCI_HCD=y

View file

@ -57,11 +57,11 @@ CONFIG_SPL_FAT_SUPPORT (fs/fat/libfat.o)
CONFIG_SPL_EXT_SUPPORT
CONFIG_SPL_LIBGENERIC_SUPPORT (lib/libgeneric.o)
CONFIG_SPL_POWER_SUPPORT (drivers/power/libpower.o)
CONFIG_SPL_NAND_SUPPORT (drivers/mtd/nand/libnand.o)
CONFIG_SPL_NAND_SUPPORT (drivers/mtd/nand/raw/libnand.o)
CONFIG_SPL_DRIVERS_MISC_SUPPORT (drivers/misc)
CONFIG_SPL_DMA_SUPPORT (drivers/dma/libdma.o)
CONFIG_SPL_POST_MEM_SUPPORT (post/drivers/memory.o)
CONFIG_SPL_NAND_LOAD (drivers/mtd/nand/nand_spl_load.o)
CONFIG_SPL_NAND_LOAD (drivers/mtd/nand/raw/nand_spl_load.o)
CONFIG_SPL_SPI_LOAD (drivers/mtd/spi/spi_spl_load.o)
CONFIG_SPL_RAM_DEVICE (common/spl/spl.c)
CONFIG_SPL_WATCHDOG_SUPPORT (drivers/watchdog/libwatchdog.o)

View file

@ -84,7 +84,7 @@ Relocation with SPL (example for the tx25 booting from NAND Flash):
- cpu copies the first page from NAND to 0xbb000000 (IMX_NFC_BASE)
and start with code execution on this address.
- The First page contains u-boot code from drivers/mtd/nand/mxc_nand_spl.c
- The First page contains u-boot code from drivers/mtd/nand/raw/mxc_nand_spl.c
which inits the dram, cpu registers, reloacte itself to CONFIG_SPL_TEXT_BASE and loads
the "real" u-boot to CONFIG_SYS_NAND_U_BOOT_DST and starts execution
@CONFIG_SYS_NAND_U_BOOT_START

View file

@ -116,7 +116,7 @@ Configuration Options:
The maximum number of NAND chips per device to be supported.
CONFIG_SYS_NAND_SELF_INIT
Traditionally, glue code in drivers/mtd/nand/nand.c has driven
Traditionally, glue code in drivers/mtd/nand/raw/nand.c has driven
the initialization process -- it provides the mtd and nand
structs, calls a board init function for a specific device,
calls nand_scan(), and registers with mtd.
@ -125,7 +125,7 @@ Configuration Options:
run code between nand_scan_ident() and nand_scan_tail(), or other
deviations from the "normal" flow.
If a board defines CONFIG_SYS_NAND_SELF_INIT, drivers/mtd/nand/nand.c
If a board defines CONFIG_SYS_NAND_SELF_INIT, drivers/mtd/nand/raw/nand.c
will make one call to board_nand_init(), with no arguments. That
function is responsible for calling a driver init function for
each NAND device on the board, that performs all initialization
@ -280,7 +280,7 @@ NOTE:
=====
The Disk On Chip driver is currently broken and has been for some time.
There is a driver in drivers/mtd/nand, taken from Linux, that works with
There is a driver in drivers/mtd/nand/raw, taken from Linux, that works with
the current NAND system but has not yet been adapted to the u-boot
environment.

View file

@ -63,7 +63,7 @@ bootmode strings at runtime.
spi - drivers/spi/zynq_spi.c
qspi - drivers/spi/zynq_qspi.c
i2c - drivers/i2c/zynq_i2c.c
nand - drivers/mtd/nand/zynq_nand.c
nand - drivers/mtd/nand/raw/zynq_nand.c
- Done proper cleanups on board configurations
- Added basic FDT support for zynq boards
- d-cache support for zynq_gem.c

View file

@ -0,0 +1,5 @@
SPI NAND flash
Required properties:
- compatible: should be "spi-nand"
- reg: should encode the chip-select line used to access the NAND chip

View file

@ -6,7 +6,7 @@ obj-$(CONFIG_$(SPL_TPL_)DRIVERS_MISC_SUPPORT) += misc/ sysreset/ firmware/
obj-$(CONFIG_$(SPL_TPL_)I2C_SUPPORT) += i2c/
obj-$(CONFIG_$(SPL_TPL_)LED) += led/
obj-$(CONFIG_$(SPL_TPL_)MMC_SUPPORT) += mmc/
obj-$(CONFIG_$(SPL_TPL_)NAND_SUPPORT) += mtd/nand/
obj-$(CONFIG_$(SPL_TPL_)NAND_SUPPORT) += mtd/nand/raw/
obj-$(CONFIG_$(SPL_TPL_)PHY) += phy/
obj-$(CONFIG_$(SPL_TPL_)PINCTRL) += pinctrl/
obj-$(CONFIG_$(SPL_TPL_)RAM) += ram/

View file

@ -1,5 +1,8 @@
menu "MTD Support"
config MTD_PARTITIONS
bool
config MTD
bool "Enable Driver Model for MTD drivers"
depends on DM
@ -59,10 +62,10 @@ config RENESAS_RPC_HF
This enables access to Hyperflash memory through the Renesas
RCar Gen3 RPC controller.
endmenu
source "drivers/mtd/nand/Kconfig"
source "drivers/mtd/spi/Kconfig"
source "drivers/mtd/ubi/Kconfig"
endmenu

View file

@ -3,7 +3,7 @@
# (C) Copyright 2000-2007
# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
ifneq (,$(findstring y,$(CONFIG_MTD_DEVICE)$(CONFIG_CMD_NAND)$(CONFIG_CMD_ONENAND)$(CONFIG_CMD_SF)))
ifneq (,$(findstring y,$(CONFIG_MTD_DEVICE)$(CONFIG_CMD_NAND)$(CONFIG_CMD_ONENAND)$(CONFIG_CMD_SF)$(CONFIG_CMD_MTD)))
obj-y += mtdcore.o mtd_uboot.o
endif
obj-$(CONFIG_MTD) += mtd-uclass.o
@ -18,3 +18,5 @@ obj-$(CONFIG_FLASH_PIC32) += pic32_flash.o
obj-$(CONFIG_ST_SMI) += st_smi.o
obj-$(CONFIG_STM32_FLASH) += stm32_flash.o
obj-$(CONFIG_RENESAS_RPC_HF) += renesas_rpc_hf.o
obj-y += nand/

View file

@ -5,9 +5,25 @@
#include <common.h>
#include <dm.h>
#include <dm/device-internal.h>
#include <errno.h>
#include <mtd.h>
/**
* mtd_probe - Probe the device @dev if not already done
*
* @dev: U-Boot device to probe
*
* @return 0 on success, an error otherwise.
*/
int mtd_probe(struct udevice *dev)
{
if (device_active(dev))
return 0;
return device_probe(dev);
}
/*
* Implement a MTD uclass which should include most flash drivers.
* The uclass private is pointed to mtd_info.

View file

@ -4,8 +4,230 @@
* Heiko Schocher, DENX Software Engineering, hs@denx.de.
*/
#include <common.h>
#include <dm/device.h>
#include <dm/uclass-internal.h>
#include <jffs2/jffs2.h> /* LEGACY */
#include <linux/mtd/mtd.h>
#include <jffs2/jffs2.h>
#include <linux/mtd/partitions.h>
#include <mtd.h>
#define MTD_NAME_MAX_LEN 20
/**
* mtd_search_alternate_name - Search an alternate name for @mtdname thanks to
* the mtdids legacy environment variable.
*
* The mtdids string is a list of comma-separated 'dev_id=mtd_id' tupples.
* Check if one of the mtd_id matches mtdname, in this case save dev_id in
* altname.
*
* @mtdname: Current MTD device name
* @altname: Alternate name to return
* @max_len: Length of the alternate name buffer
*
* @return 0 on success, an error otherwise.
*/
int mtd_search_alternate_name(const char *mtdname, char *altname,
unsigned int max_len)
{
const char *mtdids, *equal, *comma, *dev_id, *mtd_id;
int dev_id_len, mtd_id_len;
mtdids = env_get("mtdids");
if (!mtdids)
return -EINVAL;
do {
/* Find the '=' sign */
dev_id = mtdids;
equal = strchr(dev_id, '=');
if (!equal)
break;
dev_id_len = equal - mtdids;
mtd_id = equal + 1;
/* Find the end of the tupple */
comma = strchr(mtdids, ',');
if (comma)
mtd_id_len = comma - mtd_id;
else
mtd_id_len = &mtdids[strlen(mtdids)] - mtd_id + 1;
if (!dev_id_len || !mtd_id_len)
return -EINVAL;
if (dev_id_len + 1 > max_len)
continue;
/* Compare the name we search with the current mtd_id */
if (!strncmp(mtdname, mtd_id, mtd_id_len)) {
strncpy(altname, dev_id, dev_id_len);
altname[dev_id_len] = 0;
return 0;
}
/* Go to the next tupple */
mtdids = comma + 1;
} while (comma);
return -EINVAL;
}
#if IS_ENABLED(CONFIG_MTD)
static void mtd_probe_uclass_mtd_devs(void)
{
struct udevice *dev;
int idx = 0;
/* Probe devices with DM compliant drivers */
while (!uclass_find_device(UCLASS_MTD, idx, &dev) && dev) {
mtd_probe(dev);
idx++;
}
}
#else
static void mtd_probe_uclass_mtd_devs(void) { }
#endif
#if defined(CONFIG_MTD_PARTITIONS)
int mtd_probe_devices(void)
{
static char *old_mtdparts;
static char *old_mtdids;
const char *mtdparts = env_get("mtdparts");
const char *mtdids = env_get("mtdids");
bool remaining_partitions = true;
struct mtd_info *mtd;
mtd_probe_uclass_mtd_devs();
/* Check if mtdparts/mtdids changed since last call, otherwise: exit */
if (!strcmp(mtdparts, old_mtdparts) && !strcmp(mtdids, old_mtdids))
return 0;
/* Update the local copy of mtdparts */
free(old_mtdparts);
free(old_mtdids);
old_mtdparts = strdup(mtdparts);
old_mtdids = strdup(mtdids);
/* If at least one partition is still in use, do not delete anything */
mtd_for_each_device(mtd) {
if (mtd->usecount) {
printf("Partition \"%s\" already in use, aborting\n",
mtd->name);
return -EACCES;
}
}
/*
* Everything looks clear, remove all partitions. It is not safe to
* remove entries from the mtd_for_each_device loop as it uses idr
* indexes and the partitions removal is done in bulk (all partitions of
* one device at the same time), so break and iterate from start each
* time a new partition is found and deleted.
*/
while (remaining_partitions) {
remaining_partitions = false;
mtd_for_each_device(mtd) {
if (!mtd_is_partition(mtd) && mtd_has_partitions(mtd)) {
del_mtd_partitions(mtd);
remaining_partitions = true;
break;
}
}
}
/* Start the parsing by ignoring the extra 'mtdparts=' prefix, if any */
if (strstr(mtdparts, "mtdparts="))
mtdparts += 9;
/* For each MTD device in mtdparts */
while (mtdparts[0] != '\0') {
char mtd_name[MTD_NAME_MAX_LEN], *colon;
struct mtd_partition *parts;
int mtd_name_len, nparts;
int ret;
colon = strchr(mtdparts, ':');
if (!colon) {
printf("Wrong mtdparts: %s\n", mtdparts);
return -EINVAL;
}
mtd_name_len = colon - mtdparts;
strncpy(mtd_name, mtdparts, mtd_name_len);
mtd_name[mtd_name_len] = '\0';
/* Move the pointer forward (including the ':') */
mtdparts += mtd_name_len + 1;
mtd = get_mtd_device_nm(mtd_name);
if (IS_ERR_OR_NULL(mtd)) {
char linux_name[MTD_NAME_MAX_LEN];
/*
* The MTD device named "mtd_name" does not exist. Try
* to find a correspondance with an MTD device having
* the same type and number as defined in the mtdids.
*/
debug("No device named %s\n", mtd_name);
ret = mtd_search_alternate_name(mtd_name, linux_name,
MTD_NAME_MAX_LEN);
if (!ret)
mtd = get_mtd_device_nm(linux_name);
/*
* If no device could be found, move the mtdparts
* pointer forward until the next set of partitions.
*/
if (ret || IS_ERR_OR_NULL(mtd)) {
printf("Could not find a valid device for %s\n",
mtd_name);
mtdparts = strchr(mtdparts, ';');
if (mtdparts)
mtdparts++;
continue;
}
}
/*
* Parse the MTD device partitions. It will update the mtdparts
* pointer, create an array of parts (that must be freed), and
* return the number of partition structures in the array.
*/
ret = mtd_parse_partitions(mtd, &mtdparts, &parts, &nparts);
if (ret) {
printf("Could not parse device %s\n", mtd->name);
put_mtd_device(mtd);
return -EINVAL;
}
if (!nparts)
continue;
/* Create the new MTD partitions */
add_mtd_partitions(mtd, parts, nparts);
/* Free the structures allocated during the parsing */
mtd_free_parsed_partitions(parts, nparts);
put_mtd_device(mtd);
}
return 0;
}
#else
int mtd_probe_devices(void)
{
mtd_probe_uclass_mtd_devs();
return 0;
}
#endif /* defined(CONFIG_MTD_PARTITIONS) */
/* Legacy */
static int get_part(const char *partname, int *idx, loff_t *off, loff_t *size,
loff_t *maxsize, int devtype)

View file

@ -426,6 +426,8 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->index = i;
mtd->usecount = 0;
INIT_LIST_HEAD(&mtd->partitions);
/* default value if not set by driver */
if (mtd->bitflip_threshold == 0)
mtd->bitflip_threshold = mtd->ecc_strength;
@ -937,7 +939,20 @@ int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
* representing the maximum number of bitflips that were corrected on
* any one ecc region (if applicable; zero otherwise).
*/
ret_code = mtd->_read(mtd, from, len, retlen, buf);
if (mtd->_read) {
ret_code = mtd->_read(mtd, from, len, retlen, buf);
} else if (mtd->_read_oob) {
struct mtd_oob_ops ops = {
.len = len,
.datbuf = buf,
};
ret_code = mtd->_read_oob(mtd, from, &ops);
*retlen = ops.retlen;
} else {
return -ENOTSUPP;
}
if (unlikely(ret_code < 0))
return ret_code;
if (mtd->ecc_strength == 0)
@ -952,10 +967,24 @@ int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
*retlen = 0;
if (to < 0 || to > mtd->size || len > mtd->size - to)
return -EINVAL;
if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
if ((!mtd->_write && !mtd->_write_oob) ||
!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
if (!len)
return 0;
if (!mtd->_write) {
struct mtd_oob_ops ops = {
.len = len,
.datbuf = (u8 *)buf,
};
int ret;
ret = mtd->_write_oob(mtd, to, &ops);
*retlen = ops.retlen;
return ret;
}
return mtd->_write(mtd, to, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_write);
@ -983,19 +1012,64 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
}
EXPORT_SYMBOL_GPL(mtd_panic_write);
static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
struct mtd_oob_ops *ops)
{
/*
* Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
* ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
* this case.
*/
if (!ops->datbuf)
ops->len = 0;
if (!ops->oobbuf)
ops->ooblen = 0;
if (offs < 0 || offs + ops->len > mtd->size)
return -EINVAL;
if (ops->ooblen) {
u64 maxooblen;
if (ops->ooboffs >= mtd_oobavail(mtd, ops))
return -EINVAL;
maxooblen = ((mtd_div_by_ws(mtd->size, mtd) -
mtd_div_by_ws(offs, mtd)) *
mtd_oobavail(mtd, ops)) - ops->ooboffs;
if (ops->ooblen > maxooblen)
return -EINVAL;
}
return 0;
}
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
{
int ret_code;
ops->retlen = ops->oobretlen = 0;
if (!mtd->_read_oob)
ret_code = mtd_check_oob_ops(mtd, from, ops);
if (ret_code)
return ret_code;
/* Check the validity of a potential fallback on mtd->_read */
if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf))
return -EOPNOTSUPP;
if (mtd->_read_oob)
ret_code = mtd->_read_oob(mtd, from, ops);
else
ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen,
ops->datbuf);
/*
* In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
* similar to mtd->_read(), returning a non-negative integer
* representing max bitflips. In other cases, mtd->_read_oob() may
* return -EUCLEAN. In all cases, perform similar logic to mtd_read().
*/
ret_code = mtd->_read_oob(mtd, from, ops);
if (unlikely(ret_code < 0))
return ret_code;
if (mtd->ecc_strength == 0)
@ -1004,6 +1078,32 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
}
EXPORT_SYMBOL_GPL(mtd_read_oob);
int mtd_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
int ret;
ops->retlen = ops->oobretlen = 0;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
ret = mtd_check_oob_ops(mtd, to, ops);
if (ret)
return ret;
/* Check the validity of a potential fallback on mtd->_write */
if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf))
return -EOPNOTSUPP;
if (mtd->_write_oob)
return mtd->_write_oob(mtd, to, ops);
else
return mtd->_write(mtd, to, ops->len, &ops->retlen,
ops->datbuf);
}
EXPORT_SYMBOL_GPL(mtd_write_oob);
/**
* mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
* @mtd: MTD device structure

View file

@ -5,7 +5,6 @@
extern struct mutex mtd_table_mutex;
struct mtd_info *__mtd_next_device(int i);
int add_mtd_device(struct mtd_info *mtd);
int del_mtd_device(struct mtd_info *mtd);
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
@ -16,8 +15,3 @@ int parse_mtd_partitions(struct mtd_info *master, const char * const *types,
int __init init_mtdchar(void);
void __exit cleanup_mtdchar(void);
#define mtd_for_each_device(mtd) \
for ((mtd) = __mtd_next_device(0); \
(mtd) != NULL; \
(mtd) = __mtd_next_device(mtd->index + 1))

View file

@ -26,32 +26,16 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/err.h>
#include <linux/sizes.h>
#include "mtdcore.h"
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
#ifndef __UBOOT__
static DEFINE_MUTEX(mtd_partitions_mutex);
#else
DEFINE_MUTEX(mtd_partitions_mutex);
#endif
/* Our partition node structure */
struct mtd_part {
struct mtd_info mtd;
struct mtd_info *master;
uint64_t offset;
struct list_head list;
};
/*
* Given a pointer to the MTD object in the mtd_part structure, we can retrieve
* the pointer to that structure with this macro.
*/
#define PART(x) ((struct mtd_part *)(x))
#ifdef __UBOOT__
/* from mm/util.c */
@ -76,6 +60,215 @@ char *kstrdup(const char *s, gfp_t gfp)
}
#endif
#define MTD_SIZE_REMAINING (~0LLU)
#define MTD_OFFSET_NOT_SPECIFIED (~0LLU)
/**
* mtd_parse_partition - Parse @mtdparts partition definition, fill @partition
* with it and update the @mtdparts string pointer.
*
* The partition name is allocated and must be freed by the caller.
*
* This function is widely inspired from part_parse (mtdparts.c).
*
* @mtdparts: String describing the partition with mtdparts command syntax
* @partition: MTD partition structure to fill
*
* @return 0 on success, an error otherwise.
*/
static int mtd_parse_partition(const char **_mtdparts,
struct mtd_partition *partition)
{
const char *mtdparts = *_mtdparts;
const char *name = NULL;
int name_len;
char *buf;
/* Ensure the partition structure is empty */
memset(partition, 0, sizeof(struct mtd_partition));
/* Fetch the partition size */
if (*mtdparts == '-') {
/* Assign all remaining space to this partition */
partition->size = MTD_SIZE_REMAINING;
mtdparts++;
} else {
partition->size = ustrtoull(mtdparts, (char **)&mtdparts, 0);
if (partition->size < SZ_4K) {
printf("Minimum partition size 4kiB, %lldB requested\n",
partition->size);
return -EINVAL;
}
}
/* Check for the offset */
partition->offset = MTD_OFFSET_NOT_SPECIFIED;
if (*mtdparts == '@') {
mtdparts++;
partition->offset = ustrtoull(mtdparts, (char **)&mtdparts, 0);
}
/* Now look for the name */
if (*mtdparts == '(') {
name = ++mtdparts;
mtdparts = strchr(name, ')');
if (!mtdparts) {
printf("No closing ')' found in partition name\n");
return -EINVAL;
}
name_len = mtdparts - name + 1;
if ((name_len - 1) == 0) {
printf("Empty partition name\n");
return -EINVAL;
}
mtdparts++;
} else {
/* Name will be of the form size@offset */
name_len = 22;
}
/* Check if the partition is read-only */
if (strncmp(mtdparts, "ro", 2) == 0) {
partition->mask_flags |= MTD_WRITEABLE;
mtdparts += 2;
}
/* Check for a potential next partition definition */
if (*mtdparts == ',') {
if (partition->size == MTD_SIZE_REMAINING) {
printf("No partitions allowed after a fill-up\n");
return -EINVAL;
}
++mtdparts;
} else if ((*mtdparts == ';') || (*mtdparts == '\0')) {
/* NOP */
} else {
printf("Unexpected character '%c' in mtdparts\n", *mtdparts);
return -EINVAL;
}
/*
* Allocate a buffer for the name and either copy the provided name or
* auto-generate it with the form 'size@offset'.
*/
buf = malloc(name_len);
if (!buf)
return -ENOMEM;
if (name)
strncpy(buf, name, name_len - 1);
else
snprintf(buf, name_len, "0x%08llx@0x%08llx",
partition->size, partition->offset);
buf[name_len - 1] = '\0';
partition->name = buf;
*_mtdparts = mtdparts;
return 0;
}
/**
* mtd_parse_partitions - Create a partition array from an mtdparts definition
*
* Stateless function that takes a @parent MTD device, a string @_mtdparts
* describing the partitions (with the "mtdparts" command syntax) and creates
* the corresponding MTD partition structure array @_parts. Both the name and
* the structure partition itself must be freed freed, the caller may use
* @mtd_free_parsed_partitions() for this purpose.
*
* @parent: MTD device which contains the partitions
* @_mtdparts: Pointer to a string describing the partitions with "mtdparts"
* command syntax.
* @_parts: Allocated array containing the partitions, must be freed by the
* caller.
* @_nparts: Size of @_parts array.
*
* @return 0 on success, an error otherwise.
*/
int mtd_parse_partitions(struct mtd_info *parent, const char **_mtdparts,
struct mtd_partition **_parts, int *_nparts)
{
struct mtd_partition partition = {}, *parts;
const char *mtdparts = *_mtdparts;
int cur_off = 0, cur_sz = 0;
int nparts = 0;
int ret, idx;
u64 sz;
/* First, iterate over the partitions until we know their number */
while (mtdparts[0] != '\0' && mtdparts[0] != ';') {
ret = mtd_parse_partition(&mtdparts, &partition);
if (ret)
return ret;
free((char *)partition.name);
nparts++;
}
/* Allocate an array of partitions to give back to the caller */
parts = malloc(sizeof(*parts) * nparts);
if (!parts) {
printf("Not enough space to save partitions meta-data\n");
return -ENOMEM;
}
/* Iterate again over each partition to save the data in our array */
for (idx = 0; idx < nparts; idx++) {
ret = mtd_parse_partition(_mtdparts, &parts[idx]);
if (ret)
return ret;
if (parts[idx].size == MTD_SIZE_REMAINING)
parts[idx].size = parent->size - cur_sz;
cur_sz += parts[idx].size;
sz = parts[idx].size;
if (sz < parent->writesize || do_div(sz, parent->writesize)) {
printf("Partition size must be a multiple of %d\n",
parent->writesize);
return -EINVAL;
}
if (parts[idx].offset == MTD_OFFSET_NOT_SPECIFIED)
parts[idx].offset = cur_off;
cur_off += parts[idx].size;
parts[idx].ecclayout = parent->ecclayout;
}
/* Offset by one mtdparts to point to the next device if any */
if (*_mtdparts[0] == ';')
(*_mtdparts)++;
*_parts = parts;
*_nparts = nparts;
return 0;
}
/**
* mtd_free_parsed_partitions - Free dynamically allocated partitions
*
* Each successful call to @mtd_parse_partitions must be followed by a call to
* @mtd_free_parsed_partitions to free any allocated array during the parsing
* process.
*
* @parts: Array containing the partitions that will be freed.
* @nparts: Size of @parts array.
*/
void mtd_free_parsed_partitions(struct mtd_partition *parts,
unsigned int nparts)
{
int i;
for (i = 0; i < nparts; i++)
free((char *)parts[i].name);
free(parts);
}
/*
* MTD methods which simply translate the effective address and pass through
* to the _real_ device.
@ -84,19 +277,18 @@ char *kstrdup(const char *s, gfp_t gfp)
static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
struct mtd_ecc_stats stats;
int res;
stats = part->master->ecc_stats;
res = part->master->_read(part->master, from + part->offset, len,
retlen, buf);
stats = mtd->parent->ecc_stats;
res = mtd->parent->_read(mtd->parent, from + mtd->offset, len,
retlen, buf);
if (unlikely(mtd_is_eccerr(res)))
mtd->ecc_stats.failed +=
part->master->ecc_stats.failed - stats.failed;
mtd->parent->ecc_stats.failed - stats.failed;
else
mtd->ecc_stats.corrected +=
part->master->ecc_stats.corrected - stats.corrected;
mtd->parent->ecc_stats.corrected - stats.corrected;
return res;
}
@ -104,17 +296,13 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct mtd_part *part = PART(mtd);
return part->master->_point(part->master, from + part->offset, len,
retlen, virt, phys);
return mtd->parent->_point(mtd->parent, from + mtd->offset, len,
retlen, virt, phys);
}
static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
struct mtd_part *part = PART(mtd);
return part->master->_unpoint(part->master, from + part->offset, len);
return mtd->parent->_unpoint(mtd->parent, from + mtd->offset, len);
}
#endif
@ -123,17 +311,13 @@ static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
unsigned long offset,
unsigned long flags)
{
struct mtd_part *part = PART(mtd);
offset += part->offset;
return part->master->_get_unmapped_area(part->master, len, offset,
flags);
offset += mtd->offset;
return mtd->parent->_get_unmapped_area(mtd->parent, len, offset, flags);
}
static int part_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct mtd_part *part = PART(mtd);
int res;
if (from >= mtd->size)
@ -158,7 +342,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
res = part->master->_read_oob(part->master, from + part->offset, ops);
res = mtd->parent->_read_oob(mtd->parent, from + mtd->offset, ops);
if (unlikely(res)) {
if (mtd_is_bitflip(res))
mtd->ecc_stats.corrected++;
@ -171,99 +355,87 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
return part->master->_read_user_prot_reg(part->master, from, len,
retlen, buf);
return mtd->parent->_read_user_prot_reg(mtd->parent, from, len,
retlen, buf);
}
static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *buf)
{
struct mtd_part *part = PART(mtd);
return part->master->_get_user_prot_info(part->master, len, retlen,
buf);
return mtd->parent->_get_user_prot_info(mtd->parent, len, retlen,
buf);
}
static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
return part->master->_read_fact_prot_reg(part->master, from, len,
retlen, buf);
return mtd->parent->_read_fact_prot_reg(mtd->parent, from, len,
retlen, buf);
}
static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
size_t *retlen, struct otp_info *buf)
{
struct mtd_part *part = PART(mtd);
return part->master->_get_fact_prot_info(part->master, len, retlen,
buf);
return mtd->parent->_get_fact_prot_info(mtd->parent, len, retlen,
buf);
}
static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct mtd_part *part = PART(mtd);
return part->master->_write(part->master, to + part->offset, len,
retlen, buf);
return mtd->parent->_write(mtd->parent, to + mtd->offset, len,
retlen, buf);
}
static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct mtd_part *part = PART(mtd);
return part->master->_panic_write(part->master, to + part->offset, len,
retlen, buf);
return mtd->parent->_panic_write(mtd->parent, to + mtd->offset, len,
retlen, buf);
}
static int part_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
struct mtd_part *part = PART(mtd);
if (to >= mtd->size)
return -EINVAL;
if (ops->datbuf && to + ops->len > mtd->size)
return -EINVAL;
return part->master->_write_oob(part->master, to + part->offset, ops);
return mtd->parent->_write_oob(mtd->parent, to + mtd->offset, ops);
}
static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
return part->master->_write_user_prot_reg(part->master, from, len,
retlen, buf);
return mtd->parent->_write_user_prot_reg(mtd->parent, from, len,
retlen, buf);
}
static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
struct mtd_part *part = PART(mtd);
return part->master->_lock_user_prot_reg(part->master, from, len);
return mtd->parent->_lock_user_prot_reg(mtd->parent, from, len);
}
#ifndef __UBOOT__
static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
struct mtd_part *part = PART(mtd);
return part->master->_writev(part->master, vecs, count,
to + part->offset, retlen);
return mtd->parent->_writev(mtd->parent, vecs, count,
to + mtd->offset, retlen);
}
#endif
static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct mtd_part *part = PART(mtd);
int ret;
instr->addr += part->offset;
ret = part->master->_erase(part->master, instr);
instr->addr += mtd->offset;
ret = mtd->parent->_erase(mtd->parent, instr);
if (ret) {
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
instr->addr -= part->offset;
instr->fail_addr -= mtd->offset;
instr->addr -= mtd->offset;
}
return ret;
}
@ -271,11 +443,9 @@ static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
void mtd_erase_callback(struct erase_info *instr)
{
if (instr->mtd->_erase == part_erase) {
struct mtd_part *part = PART(instr->mtd);
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
instr->addr -= part->offset;
instr->fail_addr -= instr->mtd->offset;
instr->addr -= instr->mtd->offset;
}
if (instr->callback)
instr->callback(instr);
@ -284,105 +454,112 @@ EXPORT_SYMBOL_GPL(mtd_erase_callback);
static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
return part->master->_lock(part->master, ofs + part->offset, len);
return mtd->parent->_lock(mtd->parent, ofs + mtd->offset, len);
}
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
return part->master->_unlock(part->master, ofs + part->offset, len);
return mtd->parent->_unlock(mtd->parent, ofs + mtd->offset, len);
}
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct mtd_part *part = PART(mtd);
return part->master->_is_locked(part->master, ofs + part->offset, len);
return mtd->parent->_is_locked(mtd->parent, ofs + mtd->offset, len);
}
static void part_sync(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
part->master->_sync(part->master);
mtd->parent->_sync(mtd->parent);
}
#ifndef __UBOOT__
static int part_suspend(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
return part->master->_suspend(part->master);
return mtd->parent->_suspend(mtd->parent);
}
static void part_resume(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
part->master->_resume(part->master);
mtd->parent->_resume(mtd->parent);
}
#endif
static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_part *part = PART(mtd);
ofs += part->offset;
return part->master->_block_isreserved(part->master, ofs);
ofs += mtd->offset;
return mtd->parent->_block_isreserved(mtd->parent, ofs);
}
static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_part *part = PART(mtd);
ofs += part->offset;
return part->master->_block_isbad(part->master, ofs);
ofs += mtd->offset;
return mtd->parent->_block_isbad(mtd->parent, ofs);
}
static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_part *part = PART(mtd);
int res;
ofs += part->offset;
res = part->master->_block_markbad(part->master, ofs);
ofs += mtd->offset;
res = mtd->parent->_block_markbad(mtd->parent, ofs);
if (!res)
mtd->ecc_stats.badblocks++;
return res;
}
static inline void free_partition(struct mtd_part *p)
static inline void free_partition(struct mtd_info *p)
{
kfree(p->mtd.name);
kfree(p->name);
kfree(p);
}
/*
* This function unregisters and destroy all slave MTD objects which are
* attached to the given master MTD object.
* attached to the given master MTD object, recursively.
*/
int del_mtd_partitions(struct mtd_info *master)
static int do_del_mtd_partitions(struct mtd_info *master)
{
struct mtd_part *slave, *next;
struct mtd_info *slave, *next;
int ret, err = 0;
mutex_lock(&mtd_partitions_mutex);
list_for_each_entry_safe(slave, next, &mtd_partitions, list)
if (slave->master == master) {
ret = del_mtd_device(&slave->mtd);
if (ret < 0) {
err = ret;
continue;
}
list_del(&slave->list);
free_partition(slave);
list_for_each_entry_safe(slave, next, &master->partitions, node) {
if (mtd_has_partitions(slave))
del_mtd_partitions(slave);
debug("Deleting %s MTD partition\n", slave->name);
ret = del_mtd_device(slave);
if (ret < 0) {
printf("Error when deleting partition \"%s\" (%d)\n",
slave->name, ret);
err = ret;
continue;
}
mutex_unlock(&mtd_partitions_mutex);
list_del(&slave->node);
free_partition(slave);
}
return err;
}
static struct mtd_part *allocate_partition(struct mtd_info *master,
const struct mtd_partition *part, int partno,
uint64_t cur_offset)
int del_mtd_partitions(struct mtd_info *master)
{
struct mtd_part *slave;
int ret;
debug("Deleting MTD partitions on \"%s\":\n", master->name);
mutex_lock(&mtd_partitions_mutex);
ret = do_del_mtd_partitions(master);
mutex_unlock(&mtd_partitions_mutex);
return ret;
}
static struct mtd_info *allocate_partition(struct mtd_info *master,
const struct mtd_partition *part,
int partno, uint64_t cur_offset)
{
struct mtd_info *slave;
char *name;
/* allocate the partition structure */
@ -397,83 +574,87 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
}
/* set up the MTD object for this partition */
slave->mtd.type = master->type;
slave->mtd.flags = master->flags & ~part->mask_flags;
slave->mtd.size = part->size;
slave->mtd.writesize = master->writesize;
slave->mtd.writebufsize = master->writebufsize;
slave->mtd.oobsize = master->oobsize;
slave->mtd.oobavail = master->oobavail;
slave->mtd.subpage_sft = master->subpage_sft;
slave->type = master->type;
slave->flags = master->flags & ~part->mask_flags;
slave->size = part->size;
slave->writesize = master->writesize;
slave->writebufsize = master->writebufsize;
slave->oobsize = master->oobsize;
slave->oobavail = master->oobavail;
slave->subpage_sft = master->subpage_sft;
slave->mtd.name = name;
slave->mtd.owner = master->owner;
slave->name = name;
slave->owner = master->owner;
#ifndef __UBOOT__
slave->mtd.backing_dev_info = master->backing_dev_info;
slave->backing_dev_info = master->backing_dev_info;
/* NOTE: we don't arrange MTDs as a tree; it'd be error-prone
* to have the same data be in two different partitions.
*/
slave->mtd.dev.parent = master->dev.parent;
slave->dev.parent = master->dev.parent;
#endif
slave->mtd._read = part_read;
slave->mtd._write = part_write;
if (master->_read)
slave->_read = part_read;
if (master->_write)
slave->_write = part_write;
if (master->_panic_write)
slave->mtd._panic_write = part_panic_write;
slave->_panic_write = part_panic_write;
#ifndef __UBOOT__
if (master->_point && master->_unpoint) {
slave->mtd._point = part_point;
slave->mtd._unpoint = part_unpoint;
slave->_point = part_point;
slave->_unpoint = part_unpoint;
}
#endif
if (master->_get_unmapped_area)
slave->mtd._get_unmapped_area = part_get_unmapped_area;
slave->_get_unmapped_area = part_get_unmapped_area;
if (master->_read_oob)
slave->mtd._read_oob = part_read_oob;
slave->_read_oob = part_read_oob;
if (master->_write_oob)
slave->mtd._write_oob = part_write_oob;
slave->_write_oob = part_write_oob;
if (master->_read_user_prot_reg)
slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
slave->_read_user_prot_reg = part_read_user_prot_reg;
if (master->_read_fact_prot_reg)
slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
slave->_read_fact_prot_reg = part_read_fact_prot_reg;
if (master->_write_user_prot_reg)
slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
slave->_write_user_prot_reg = part_write_user_prot_reg;
if (master->_lock_user_prot_reg)
slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
slave->_lock_user_prot_reg = part_lock_user_prot_reg;
if (master->_get_user_prot_info)
slave->mtd._get_user_prot_info = part_get_user_prot_info;
slave->_get_user_prot_info = part_get_user_prot_info;
if (master->_get_fact_prot_info)
slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
slave->_get_fact_prot_info = part_get_fact_prot_info;
if (master->_sync)
slave->mtd._sync = part_sync;
slave->_sync = part_sync;
#ifndef __UBOOT__
if (!partno && !master->dev.class && master->_suspend &&
master->_resume) {
slave->mtd._suspend = part_suspend;
slave->mtd._resume = part_resume;
slave->_suspend = part_suspend;
slave->_resume = part_resume;
}
if (master->_writev)
slave->mtd._writev = part_writev;
slave->_writev = part_writev;
#endif
if (master->_lock)
slave->mtd._lock = part_lock;
slave->_lock = part_lock;
if (master->_unlock)
slave->mtd._unlock = part_unlock;
slave->_unlock = part_unlock;
if (master->_is_locked)
slave->mtd._is_locked = part_is_locked;
slave->_is_locked = part_is_locked;
if (master->_block_isreserved)
slave->mtd._block_isreserved = part_block_isreserved;
slave->_block_isreserved = part_block_isreserved;
if (master->_block_isbad)
slave->mtd._block_isbad = part_block_isbad;
slave->_block_isbad = part_block_isbad;
if (master->_block_markbad)
slave->mtd._block_markbad = part_block_markbad;
slave->mtd._erase = part_erase;
slave->master = master;
slave->_block_markbad = part_block_markbad;
slave->_erase = part_erase;
slave->parent = master;
slave->offset = part->offset;
INIT_LIST_HEAD(&slave->partitions);
INIT_LIST_HEAD(&slave->node);
if (slave->offset == MTDPART_OFS_APPEND)
slave->offset = cur_offset;
@ -489,41 +670,41 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
}
if (slave->offset == MTDPART_OFS_RETAIN) {
slave->offset = cur_offset;
if (master->size - slave->offset >= slave->mtd.size) {
slave->mtd.size = master->size - slave->offset
- slave->mtd.size;
if (master->size - slave->offset >= slave->size) {
slave->size = master->size - slave->offset
- slave->size;
} else {
debug("mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
part->name, master->size - slave->offset,
slave->mtd.size);
slave->size);
/* register to preserve ordering */
goto out_register;
}
}
if (slave->mtd.size == MTDPART_SIZ_FULL)
slave->mtd.size = master->size - slave->offset;
if (slave->size == MTDPART_SIZ_FULL)
slave->size = master->size - slave->offset;
debug("0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
(unsigned long long)(slave->offset + slave->size), slave->name);
/* let's do some sanity checks */
if (slave->offset >= master->size) {
/* let's register it anyway to preserve ordering */
slave->offset = 0;
slave->mtd.size = 0;
slave->size = 0;
printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
part->name);
goto out_register;
}
if (slave->offset + slave->mtd.size > master->size) {
slave->mtd.size = master->size - slave->offset;
if (slave->offset + slave->size > master->size) {
slave->size = master->size - slave->offset;
printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
part->name, master->name, (unsigned long long)slave->mtd.size);
part->name, master->name, slave->size);
}
if (master->numeraseregions > 1) {
/* Deal with variable erase size stuff */
int i, max = master->numeraseregions;
u64 end = slave->offset + slave->mtd.size;
u64 end = slave->offset + slave->size;
struct mtd_erase_region_info *regions = master->eraseregions;
/* Find the first erase regions which is part of this
@ -536,44 +717,43 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
/* Pick biggest erasesize */
for (; i < max && regions[i].offset < end; i++) {
if (slave->mtd.erasesize < regions[i].erasesize) {
slave->mtd.erasesize = regions[i].erasesize;
}
if (slave->erasesize < regions[i].erasesize)
slave->erasesize = regions[i].erasesize;
}
BUG_ON(slave->mtd.erasesize == 0);
WARN_ON(slave->erasesize == 0);
} else {
/* Single erase size */
slave->mtd.erasesize = master->erasesize;
slave->erasesize = master->erasesize;
}
if ((slave->mtd.flags & MTD_WRITEABLE) &&
mtd_mod_by_eb(slave->offset, &slave->mtd)) {
if ((slave->flags & MTD_WRITEABLE) &&
mtd_mod_by_eb(slave->offset, slave)) {
/* Doesn't start on a boundary of major erase size */
/* FIXME: Let it be writable if it is on a boundary of
* _minor_ erase size though */
slave->mtd.flags &= ~MTD_WRITEABLE;
slave->flags &= ~MTD_WRITEABLE;
printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
part->name);
}
if ((slave->mtd.flags & MTD_WRITEABLE) &&
mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
slave->mtd.flags &= ~MTD_WRITEABLE;
if ((slave->flags & MTD_WRITEABLE) &&
mtd_mod_by_eb(slave->size, slave)) {
slave->flags &= ~MTD_WRITEABLE;
printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
part->name);
}
slave->mtd.ecclayout = master->ecclayout;
slave->mtd.ecc_step_size = master->ecc_step_size;
slave->mtd.ecc_strength = master->ecc_strength;
slave->mtd.bitflip_threshold = master->bitflip_threshold;
slave->ecclayout = master->ecclayout;
slave->ecc_step_size = master->ecc_step_size;
slave->ecc_strength = master->ecc_strength;
slave->bitflip_threshold = master->bitflip_threshold;
if (master->_block_isbad) {
uint64_t offs = 0;
while (offs < slave->mtd.size) {
while (offs < slave->size) {
if (mtd_block_isbad(master, offs + slave->offset))
slave->mtd.ecc_stats.badblocks++;
offs += slave->mtd.erasesize;
slave->ecc_stats.badblocks++;
offs += slave->erasesize;
}
}
@ -586,7 +766,7 @@ int mtd_add_partition(struct mtd_info *master, const char *name,
long long offset, long long length)
{
struct mtd_partition part;
struct mtd_part *p, *new;
struct mtd_info *p, *new;
uint64_t start, end;
int ret = 0;
@ -615,21 +795,20 @@ int mtd_add_partition(struct mtd_info *master, const char *name,
end = offset + length;
mutex_lock(&mtd_partitions_mutex);
list_for_each_entry(p, &mtd_partitions, list)
if (p->master == master) {
if ((start >= p->offset) &&
(start < (p->offset + p->mtd.size)))
goto err_inv;
list_for_each_entry(p, &master->partitions, node) {
if (start >= p->offset &&
(start < (p->offset + p->size)))
goto err_inv;
if ((end >= p->offset) &&
(end < (p->offset + p->mtd.size)))
goto err_inv;
}
if (end >= p->offset &&
(end < (p->offset + p->size)))
goto err_inv;
}
list_add(&new->list, &mtd_partitions);
list_add_tail(&new->node, &master->partitions);
mutex_unlock(&mtd_partitions_mutex);
add_mtd_device(&new->mtd);
add_mtd_device(new);
return ret;
err_inv:
@ -641,18 +820,17 @@ EXPORT_SYMBOL_GPL(mtd_add_partition);
int mtd_del_partition(struct mtd_info *master, int partno)
{
struct mtd_part *slave, *next;
struct mtd_info *slave, *next;
int ret = -EINVAL;
mutex_lock(&mtd_partitions_mutex);
list_for_each_entry_safe(slave, next, &mtd_partitions, list)
if ((slave->master == master) &&
(slave->mtd.index == partno)) {
ret = del_mtd_device(&slave->mtd);
list_for_each_entry_safe(slave, next, &master->partitions, node)
if (slave->index == partno) {
ret = del_mtd_device(slave);
if (ret < 0)
break;
list_del(&slave->list);
list_del(&slave->node);
free_partition(slave);
break;
}
@ -676,20 +854,10 @@ int add_mtd_partitions(struct mtd_info *master,
const struct mtd_partition *parts,
int nbparts)
{
struct mtd_part *slave;
struct mtd_info *slave;
uint64_t cur_offset = 0;
int i;
#ifdef __UBOOT__
/*
* Need to init the list here, since LIST_INIT() does not
* work on platforms where relocation has problems (like MIPS
* & PPC).
*/
if (mtd_partitions.next == NULL)
INIT_LIST_HEAD(&mtd_partitions);
#endif
debug("Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
for (i = 0; i < nbparts; i++) {
@ -698,12 +866,12 @@ int add_mtd_partitions(struct mtd_info *master,
return PTR_ERR(slave);
mutex_lock(&mtd_partitions_mutex);
list_add(&slave->list, &mtd_partitions);
list_add_tail(&slave->node, &master->partitions);
mutex_unlock(&mtd_partitions_mutex);
add_mtd_device(&slave->mtd);
add_mtd_device(slave);
cur_offset = slave->offset + slave->mtd.size;
cur_offset = slave->offset + slave->size;
}
return 0;
@ -806,29 +974,12 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
}
#endif
int mtd_is_partition(const struct mtd_info *mtd)
{
struct mtd_part *part;
int ispart = 0;
mutex_lock(&mtd_partitions_mutex);
list_for_each_entry(part, &mtd_partitions, list)
if (&part->mtd == mtd) {
ispart = 1;
break;
}
mutex_unlock(&mtd_partitions_mutex);
return ispart;
}
EXPORT_SYMBOL_GPL(mtd_is_partition);
/* Returns the size of the entire flash chip */
uint64_t mtd_get_device_size(const struct mtd_info *mtd)
{
if (!mtd_is_partition(mtd))
return mtd->size;
if (mtd_is_partition(mtd))
return mtd->parent->size;
return PART(mtd)->master->size;
return mtd->size;
}
EXPORT_SYMBOL_GPL(mtd_get_device_size);

View file

@ -1,297 +1,6 @@
config MTD_NAND_CORE
tristate
menuconfig NAND
bool "NAND Device Support"
if NAND
source "drivers/mtd/nand/raw/Kconfig"
config SYS_NAND_SELF_INIT
bool
help
This option, if enabled, provides more flexible and linux-like
NAND initialization process.
config NAND_ATMEL
bool "Support Atmel NAND controller"
imply SYS_NAND_USE_FLASH_BBT
help
Enable this driver for NAND flash platforms using an Atmel NAND
controller.
config NAND_DAVINCI
bool "Support TI Davinci NAND controller"
help
Enable this driver for NAND flash controllers available in TI Davinci
and Keystone2 platforms
config NAND_DENALI
bool
select SYS_NAND_SELF_INIT
imply CMD_NAND
config NAND_DENALI_DT
bool "Support Denali NAND controller as a DT device"
select NAND_DENALI
depends on OF_CONTROL && DM
help
Enable the driver for NAND flash on platforms using a Denali NAND
controller as a DT device.
config NAND_DENALI_SPARE_AREA_SKIP_BYTES
int "Number of bytes skipped in OOB area"
depends on NAND_DENALI
range 0 63
help
This option specifies the number of bytes to skip from the beginning
of OOB area before last ECC sector data starts. This is potentially
used to preserve the bad block marker in the OOB area.
config NAND_LPC32XX_SLC
bool "Support LPC32XX_SLC controller"
help
Enable the LPC32XX SLC NAND controller.
config NAND_OMAP_GPMC
bool "Support OMAP GPMC NAND controller"
depends on ARCH_OMAP2PLUS
help
Enables omap_gpmc.c driver for OMAPx and AMxxxx platforms.
GPMC controller is used for parallel NAND flash devices, and can
do ECC calculation (not ECC error detection) for HAM1, BCH4, BCH8
and BCH16 ECC algorithms.
config NAND_OMAP_GPMC_PREFETCH
bool "Enable GPMC Prefetch"
depends on NAND_OMAP_GPMC
default y
help
On OMAP platforms that use the GPMC controller
(CONFIG_NAND_OMAP_GPMC_PREFETCH), this options enables the code that
uses the prefetch mode to speed up read operations.
config NAND_OMAP_ELM
bool "Enable ELM driver for OMAPxx and AMxx platforms."
depends on NAND_OMAP_GPMC && !OMAP34XX
help
ELM controller is used for ECC error detection (not ECC calculation)
of BCH4, BCH8 and BCH16 ECC algorithms.
Some legacy platforms like OMAP3xx do not have in-built ELM h/w engine,
thus such SoC platforms need to depend on software library for ECC error
detection. However ECC calculation on such plaforms would still be
done by GPMC controller.
config NAND_VF610_NFC
bool "Support for Freescale NFC for VF610"
select SYS_NAND_SELF_INIT
imply CMD_NAND
help
Enables support for NAND Flash Controller on some Freescale
processors like the VF610, MCF54418 or Kinetis K70.
The driver supports a maximum 2k page size. The driver
currently does not support hardware ECC.
choice
prompt "Hardware ECC strength"
depends on NAND_VF610_NFC
default SYS_NAND_VF610_NFC_45_ECC_BYTES
help
Select the ECC strength used in the hardware BCH ECC block.
config SYS_NAND_VF610_NFC_45_ECC_BYTES
bool "24-error correction (45 ECC bytes)"
config SYS_NAND_VF610_NFC_60_ECC_BYTES
bool "32-error correction (60 ECC bytes)"
endchoice
config NAND_PXA3XX
bool "Support for NAND on PXA3xx and Armada 370/XP/38x"
select SYS_NAND_SELF_INIT
imply CMD_NAND
help
This enables the driver for the NAND flash device found on
PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
config NAND_SUNXI
bool "Support for NAND on Allwinner SoCs"
default ARCH_SUNXI
depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || MACH_SUN8I
select SYS_NAND_SELF_INIT
select SYS_NAND_U_BOOT_LOCATIONS
select SPL_NAND_SUPPORT
imply CMD_NAND
---help---
Enable support for NAND. This option enables the standard and
SPL drivers.
The SPL driver only supports reading from the NAND using DMA
transfers.
if NAND_SUNXI
config NAND_SUNXI_SPL_ECC_STRENGTH
int "Allwinner NAND SPL ECC Strength"
default 64
config NAND_SUNXI_SPL_ECC_SIZE
int "Allwinner NAND SPL ECC Step Size"
default 1024
config NAND_SUNXI_SPL_USABLE_PAGE_SIZE
int "Allwinner NAND SPL Usable Page Size"
default 1024
endif
config NAND_ARASAN
bool "Configure Arasan Nand"
select SYS_NAND_SELF_INIT
imply CMD_NAND
help
This enables Nand driver support for Arasan nand flash
controller. This uses the hardware ECC for read and
write operations.
config NAND_MXC
bool "MXC NAND support"
depends on CPU_ARM926EJS || CPU_ARM1136 || MX5
imply CMD_NAND
help
This enables the NAND driver for the NAND flash controller on the
i.MX27 / i.MX31 / i.MX5 rocessors.
config NAND_MXS
bool "MXS NAND support"
depends on MX23 || MX28 || MX6 || MX7
select SYS_NAND_SELF_INIT
imply CMD_NAND
select APBH_DMA
select APBH_DMA_BURST if ARCH_MX6 || ARCH_MX7
select APBH_DMA_BURST8 if ARCH_MX6 || ARCH_MX7
help
This enables NAND driver for the NAND flash controller on the
MXS processors.
if NAND_MXS
config NAND_MXS_DT
bool "Support MXS NAND controller as a DT device"
depends on OF_CONTROL && MTD
help
Enable the driver for MXS NAND flash on platforms using
device tree.
config NAND_MXS_USE_MINIMUM_ECC
bool "Use minimum ECC strength supported by the controller"
default false
endif
config NAND_ZYNQ
bool "Support for Zynq Nand controller"
select SYS_NAND_SELF_INIT
imply CMD_NAND
help
This enables Nand driver support for Nand flash controller
found on Zynq SoC.
config NAND_ZYNQ_USE_BOOTLOADER1_TIMINGS
bool "Enable use of 1st stage bootloader timing for NAND"
depends on NAND_ZYNQ
help
This flag prevent U-boot reconfigure NAND flash controller and reuse
the NAND timing from 1st stage bootloader.
comment "Generic NAND options"
config SYS_NAND_BLOCK_SIZE
hex "NAND chip eraseblock size"
depends on ARCH_SUNXI
help
Number of data bytes in one eraseblock for the NAND chip on the
board. This is the multiple of NAND_PAGE_SIZE and the number of
pages.
config SYS_NAND_PAGE_SIZE
hex "NAND chip page size"
depends on ARCH_SUNXI
help
Number of data bytes in one page for the NAND chip on the
board, not including the OOB area.
config SYS_NAND_OOBSIZE
hex "NAND chip OOB size"
depends on ARCH_SUNXI
help
Number of bytes in the Out-Of-Band area for the NAND chip on
the board.
# Enhance depends when converting drivers to Kconfig which use this config
# option (mxc_nand, ndfc, omap_gpmc).
config SYS_NAND_BUSWIDTH_16BIT
bool "Use 16-bit NAND interface"
depends on NAND_VF610_NFC || NAND_OMAP_GPMC || NAND_MXC || ARCH_DAVINCI
help
Indicates that NAND device has 16-bit wide data-bus. In absence of this
config, bus-width of NAND device is assumed to be either 8-bit and later
determined by reading ONFI params.
Above config is useful when NAND device's bus-width information cannot
be determined from on-chip ONFI params, like in following scenarios:
- SPL boot does not support reading of ONFI parameters. This is done to
keep SPL code foot-print small.
- In current U-Boot flow using nand_init(), driver initialization
happens in board_nand_init() which is called before any device probe
(nand_scan_ident + nand_scan_tail), thus device's ONFI parameters are
not available while configuring controller. So a static CONFIG_NAND_xx
is needed to know the device's bus-width in advance.
if SPL
config SYS_NAND_U_BOOT_LOCATIONS
bool "Define U-boot binaries locations in NAND"
help
Enable CONFIG_SYS_NAND_U_BOOT_OFFS though Kconfig.
This option should not be enabled when compiling U-boot for boards
defining CONFIG_SYS_NAND_U_BOOT_OFFS in their include/configs/<board>.h
file.
config SYS_NAND_U_BOOT_OFFS
hex "Location in NAND to read U-Boot from"
default 0x800000 if NAND_SUNXI
depends on SYS_NAND_U_BOOT_LOCATIONS
help
Set the offset from the start of the nand where u-boot should be
loaded from.
config SYS_NAND_U_BOOT_OFFS_REDUND
hex "Location in NAND to read U-Boot from"
default SYS_NAND_U_BOOT_OFFS
depends on SYS_NAND_U_BOOT_LOCATIONS
help
Set the offset from the start of the nand where the redundant u-boot
should be loaded from.
config SPL_NAND_AM33XX_BCH
bool "Enables SPL-NAND driver which supports ELM based"
depends on NAND_OMAP_GPMC && !OMAP34XX
default y
help
Hardware ECC correction. This is useful for platforms which have ELM
hardware engine and use NAND boot mode.
Some legacy platforms like OMAP3xx do not have in-built ELM h/w engine,
so those platforms should use CONFIG_SPL_NAND_SIMPLE for enabling
SPL-NAND driver with software ECC correction support.
config SPL_NAND_DENALI
bool "Support Denali NAND controller for SPL"
help
This is a small implementation of the Denali NAND controller
for use on SPL.
config SPL_NAND_SIMPLE
bool "Use simple SPL NAND driver"
depends on !SPL_NAND_AM33XX_BCH
help
Support for NAND boot using simple NAND drivers that
expose the cmd_ctrl() interface.
endif
endif # if NAND
source "drivers/mtd/nand/spi/Kconfig"

View file

@ -1,77 +1,5 @@
# SPDX-License-Identifier: GPL-2.0+
#
# (C) Copyright 2006
# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
ifdef CONFIG_SPL_BUILD
ifdef CONFIG_SPL_NAND_DRIVERS
NORMAL_DRIVERS=y
endif
obj-$(CONFIG_SPL_NAND_AM33XX_BCH) += am335x_spl_bch.o
obj-$(CONFIG_SPL_NAND_DENALI) += denali_spl.o
obj-$(CONFIG_SPL_NAND_SIMPLE) += nand_spl_simple.o
obj-$(CONFIG_SPL_NAND_LOAD) += nand_spl_load.o
obj-$(CONFIG_SPL_NAND_ECC) += nand_ecc.o
obj-$(CONFIG_SPL_NAND_BASE) += nand_base.o
obj-$(CONFIG_SPL_NAND_IDENT) += nand_ids.o nand_timings.o
obj-$(CONFIG_SPL_NAND_INIT) += nand.o
ifeq ($(CONFIG_SPL_ENV_SUPPORT),y)
obj-$(CONFIG_ENV_IS_IN_NAND) += nand_util.o
endif
else # not spl
NORMAL_DRIVERS=y
obj-y += nand.o
obj-y += nand_bbt.o
obj-y += nand_ids.o
obj-y += nand_util.o
obj-y += nand_ecc.o
obj-y += nand_base.o
obj-y += nand_timings.o
endif # not spl
ifdef NORMAL_DRIVERS
obj-$(CONFIG_NAND_ECC_BCH) += nand_bch.o
obj-$(CONFIG_NAND_ATMEL) += atmel_nand.o
obj-$(CONFIG_NAND_ARASAN) += arasan_nfc.o
obj-$(CONFIG_NAND_DAVINCI) += davinci_nand.o
obj-$(CONFIG_NAND_DENALI) += denali.o
obj-$(CONFIG_NAND_DENALI_DT) += denali_dt.o
obj-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_nand.o
obj-$(CONFIG_NAND_FSL_IFC) += fsl_ifc_nand.o
obj-$(CONFIG_NAND_FSL_UPM) += fsl_upm.o
obj-$(CONFIG_NAND_FSMC) += fsmc_nand.o
obj-$(CONFIG_NAND_KB9202) += kb9202_nand.o
obj-$(CONFIG_NAND_KIRKWOOD) += kirkwood_nand.o
obj-$(CONFIG_NAND_KMETER1) += kmeter1_nand.o
obj-$(CONFIG_NAND_LPC32XX_MLC) += lpc32xx_nand_mlc.o
obj-$(CONFIG_NAND_LPC32XX_SLC) += lpc32xx_nand_slc.o
obj-$(CONFIG_NAND_VF610_NFC) += vf610_nfc.o
obj-$(CONFIG_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_NAND_MXS) += mxs_nand.o
obj-$(CONFIG_NAND_MXS_DT) += mxs_nand_dt.o
obj-$(CONFIG_NAND_PXA3XX) += pxa3xx_nand.o
obj-$(CONFIG_NAND_SPEAR) += spr_nand.o
obj-$(CONFIG_TEGRA_NAND) += tegra_nand.o
obj-$(CONFIG_NAND_OMAP_GPMC) += omap_gpmc.o
obj-$(CONFIG_NAND_OMAP_ELM) += omap_elm.o
obj-$(CONFIG_NAND_PLAT) += nand_plat.o
obj-$(CONFIG_NAND_SUNXI) += sunxi_nand.o
obj-$(CONFIG_NAND_ZYNQ) += zynq_nand.o
else # minimal SPL drivers
obj-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_spl.o
obj-$(CONFIG_NAND_FSL_IFC) += fsl_ifc_spl.o
obj-$(CONFIG_NAND_MXC) += mxc_nand_spl.o
obj-$(CONFIG_NAND_MXS) += mxs_nand_spl.o mxs_nand.o
obj-$(CONFIG_NAND_SUNXI) += sunxi_nand_spl.o
endif # drivers
nandcore-objs := core.o bbt.o
obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
obj-$(CONFIG_MTD_SPI_NAND) += spi/

132
drivers/mtd/nand/bbt.c Normal file
View file

@ -0,0 +1,132 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017 Free Electrons
*
* Authors:
* Boris Brezillon <boris.brezillon@free-electrons.com>
* Peter Pan <peterpandong@micron.com>
*/
#define pr_fmt(fmt) "nand-bbt: " fmt
#include <linux/mtd/nand.h>
#ifndef __UBOOT__
#include <linux/slab.h>
#endif
/**
* nanddev_bbt_init() - Initialize the BBT (Bad Block Table)
* @nand: NAND device
*
* Initialize the in-memory BBT.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int nanddev_bbt_init(struct nand_device *nand)
{
unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
unsigned int nblocks = nanddev_neraseblocks(nand);
unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block,
BITS_PER_LONG);
nand->bbt.cache = kzalloc(nwords, GFP_KERNEL);
if (!nand->bbt.cache)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(nanddev_bbt_init);
/**
* nanddev_bbt_cleanup() - Cleanup the BBT (Bad Block Table)
* @nand: NAND device
*
* Undoes what has been done in nanddev_bbt_init()
*/
void nanddev_bbt_cleanup(struct nand_device *nand)
{
kfree(nand->bbt.cache);
}
EXPORT_SYMBOL_GPL(nanddev_bbt_cleanup);
/**
* nanddev_bbt_update() - Update a BBT
* @nand: nand device
*
* Update the BBT. Currently a NOP function since on-flash bbt is not yet
* supported.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int nanddev_bbt_update(struct nand_device *nand)
{
return 0;
}
EXPORT_SYMBOL_GPL(nanddev_bbt_update);
/**
* nanddev_bbt_get_block_status() - Return the status of an eraseblock
* @nand: nand device
* @entry: the BBT entry
*
* Return: a positive number nand_bbt_block_status status or -%ERANGE if @entry
* is bigger than the BBT size.
*/
int nanddev_bbt_get_block_status(const struct nand_device *nand,
unsigned int entry)
{
unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
unsigned long *pos = nand->bbt.cache +
((entry * bits_per_block) / BITS_PER_LONG);
unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
unsigned long status;
if (entry >= nanddev_neraseblocks(nand))
return -ERANGE;
status = pos[0] >> offs;
if (bits_per_block + offs > BITS_PER_LONG)
status |= pos[1] << (BITS_PER_LONG - offs);
return status & GENMASK(bits_per_block - 1, 0);
}
EXPORT_SYMBOL_GPL(nanddev_bbt_get_block_status);
/**
* nanddev_bbt_set_block_status() - Update the status of an eraseblock in the
* in-memory BBT
* @nand: nand device
* @entry: the BBT entry to update
* @status: the new status
*
* Update an entry of the in-memory BBT. If you want to push the updated BBT
* the NAND you should call nanddev_bbt_update().
*
* Return: 0 in case of success or -%ERANGE if @entry is bigger than the BBT
* size.
*/
int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
enum nand_bbt_block_status status)
{
unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
unsigned long *pos = nand->bbt.cache +
((entry * bits_per_block) / BITS_PER_LONG);
unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
unsigned long val = status & GENMASK(bits_per_block - 1, 0);
if (entry >= nanddev_neraseblocks(nand))
return -ERANGE;
pos[0] &= ~GENMASK(offs + bits_per_block - 1, offs);
pos[0] |= val << offs;
if (bits_per_block + offs > BITS_PER_LONG) {
unsigned int rbits = bits_per_block + offs - BITS_PER_LONG;
pos[1] &= ~GENMASK(rbits - 1, 0);
pos[1] |= val >> rbits;
}
return 0;
}
EXPORT_SYMBOL_GPL(nanddev_bbt_set_block_status);

243
drivers/mtd/nand/core.c Normal file
View file

@ -0,0 +1,243 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017 Free Electrons
*
* Authors:
* Boris Brezillon <boris.brezillon@free-electrons.com>
* Peter Pan <peterpandong@micron.com>
*/
#define pr_fmt(fmt) "nand: " fmt
#ifndef __UBOOT__
#include <linux/module.h>
#endif
#include <linux/mtd/nand.h>
/**
* nanddev_isbad() - Check if a block is bad
* @nand: NAND device
* @pos: position pointing to the block we want to check
*
* Return: true if the block is bad, false otherwise.
*/
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
{
if (nanddev_bbt_is_initialized(nand)) {
unsigned int entry;
int status;
entry = nanddev_bbt_pos_to_entry(nand, pos);
status = nanddev_bbt_get_block_status(nand, entry);
/* Lazy block status retrieval */
if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
if (nand->ops->isbad(nand, pos))
status = NAND_BBT_BLOCK_FACTORY_BAD;
else
status = NAND_BBT_BLOCK_GOOD;
nanddev_bbt_set_block_status(nand, entry, status);
}
if (status == NAND_BBT_BLOCK_WORN ||
status == NAND_BBT_BLOCK_FACTORY_BAD)
return true;
return false;
}
return nand->ops->isbad(nand, pos);
}
EXPORT_SYMBOL_GPL(nanddev_isbad);
/**
* nanddev_markbad() - Mark a block as bad
* @nand: NAND device
* @pos: position of the block to mark bad
*
* Mark a block bad. This function is updating the BBT if available and
* calls the low-level markbad hook (nand->ops->markbad()).
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
{
struct mtd_info *mtd = nanddev_to_mtd(nand);
unsigned int entry;
int ret = 0;
if (nanddev_isbad(nand, pos))
return 0;
ret = nand->ops->markbad(nand, pos);
if (ret)
pr_warn("failed to write BBM to block @%llx (err = %d)\n",
nanddev_pos_to_offs(nand, pos), ret);
if (!nanddev_bbt_is_initialized(nand))
goto out;
entry = nanddev_bbt_pos_to_entry(nand, pos);
ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
if (ret)
goto out;
ret = nanddev_bbt_update(nand);
out:
if (!ret)
mtd->ecc_stats.badblocks++;
return ret;
}
EXPORT_SYMBOL_GPL(nanddev_markbad);
/**
* nanddev_isreserved() - Check whether an eraseblock is reserved or not
* @nand: NAND device
* @pos: NAND position to test
*
* Checks whether the eraseblock pointed by @pos is reserved or not.
*
* Return: true if the eraseblock is reserved, false otherwise.
*/
bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
{
unsigned int entry;
int status;
if (!nanddev_bbt_is_initialized(nand))
return false;
/* Return info from the table */
entry = nanddev_bbt_pos_to_entry(nand, pos);
status = nanddev_bbt_get_block_status(nand, entry);
return status == NAND_BBT_BLOCK_RESERVED;
}
EXPORT_SYMBOL_GPL(nanddev_isreserved);
/**
* nanddev_erase() - Erase a NAND portion
* @nand: NAND device
* @pos: position of the block to erase
*
* Erases the block if it's not bad.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
{
if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
pr_warn("attempt to erase a bad/reserved block @%llx\n",
nanddev_pos_to_offs(nand, pos));
return -EIO;
}
return nand->ops->erase(nand, pos);
}
EXPORT_SYMBOL_GPL(nanddev_erase);
/**
* nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
* @mtd: MTD device
* @einfo: erase request
*
* This is a simple mtd->_erase() implementation iterating over all blocks
* concerned by @einfo and calling nand->ops->erase() on each of them.
*
* Note that mtd->_erase should not be directly assigned to this helper,
* because there's no locking here. NAND specialized layers should instead
* implement there own wrapper around nanddev_mtd_erase() taking the
* appropriate lock before calling nanddev_mtd_erase().
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
{
struct nand_device *nand = mtd_to_nanddev(mtd);
struct nand_pos pos, last;
int ret;
nanddev_offs_to_pos(nand, einfo->addr, &pos);
nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
while (nanddev_pos_cmp(&pos, &last) <= 0) {
ret = nanddev_erase(nand, &pos);
if (ret) {
einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
return ret;
}
nanddev_pos_next_eraseblock(nand, &pos);
}
return 0;
}
EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
/**
* nanddev_init() - Initialize a NAND device
* @nand: NAND device
* @ops: NAND device operations
* @owner: NAND device owner
*
* Initializes a NAND device object. Consistency checks are done on @ops and
* @nand->memorg. Also takes care of initializing the BBT.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
struct module *owner)
{
struct mtd_info *mtd = nanddev_to_mtd(nand);
struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
if (!nand || !ops)
return -EINVAL;
if (!ops->erase || !ops->markbad || !ops->isbad)
return -EINVAL;
if (!memorg->bits_per_cell || !memorg->pagesize ||
!memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
!memorg->planes_per_lun || !memorg->luns_per_target ||
!memorg->ntargets)
return -EINVAL;
nand->rowconv.eraseblock_addr_shift =
fls(memorg->pages_per_eraseblock - 1);
nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
nand->rowconv.eraseblock_addr_shift;
nand->ops = ops;
mtd->type = memorg->bits_per_cell == 1 ?
MTD_NANDFLASH : MTD_MLCNANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
mtd->writesize = memorg->pagesize;
mtd->writebufsize = memorg->pagesize;
mtd->oobsize = memorg->oobsize;
mtd->size = nanddev_size(nand);
mtd->owner = owner;
return nanddev_bbt_init(nand);
}
EXPORT_SYMBOL_GPL(nanddev_init);
/**
* nanddev_cleanup() - Release resources allocated in nanddev_init()
* @nand: NAND device
*
* Basically undoes what has been done in nanddev_init().
*/
void nanddev_cleanup(struct nand_device *nand)
{
if (nanddev_bbt_is_initialized(nand))
nanddev_bbt_cleanup(nand);
}
EXPORT_SYMBOL_GPL(nanddev_cleanup);
MODULE_DESCRIPTION("Generic NAND framework");
MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,297 @@
menuconfig NAND
bool "Raw NAND Device Support"
if NAND
config SYS_NAND_SELF_INIT
bool
help
This option, if enabled, provides more flexible and linux-like
NAND initialization process.
config NAND_ATMEL
bool "Support Atmel NAND controller"
imply SYS_NAND_USE_FLASH_BBT
help
Enable this driver for NAND flash platforms using an Atmel NAND
controller.
config NAND_DAVINCI
bool "Support TI Davinci NAND controller"
help
Enable this driver for NAND flash controllers available in TI Davinci
and Keystone2 platforms
config NAND_DENALI
bool
select SYS_NAND_SELF_INIT
imply CMD_NAND
config NAND_DENALI_DT
bool "Support Denali NAND controller as a DT device"
select NAND_DENALI
depends on OF_CONTROL && DM
help
Enable the driver for NAND flash on platforms using a Denali NAND
controller as a DT device.
config NAND_DENALI_SPARE_AREA_SKIP_BYTES
int "Number of bytes skipped in OOB area"
depends on NAND_DENALI
range 0 63
help
This option specifies the number of bytes to skip from the beginning
of OOB area before last ECC sector data starts. This is potentially
used to preserve the bad block marker in the OOB area.
config NAND_LPC32XX_SLC
bool "Support LPC32XX_SLC controller"
help
Enable the LPC32XX SLC NAND controller.
config NAND_OMAP_GPMC
bool "Support OMAP GPMC NAND controller"
depends on ARCH_OMAP2PLUS
help
Enables omap_gpmc.c driver for OMAPx and AMxxxx platforms.
GPMC controller is used for parallel NAND flash devices, and can
do ECC calculation (not ECC error detection) for HAM1, BCH4, BCH8
and BCH16 ECC algorithms.
config NAND_OMAP_GPMC_PREFETCH
bool "Enable GPMC Prefetch"
depends on NAND_OMAP_GPMC
default y
help
On OMAP platforms that use the GPMC controller
(CONFIG_NAND_OMAP_GPMC_PREFETCH), this options enables the code that
uses the prefetch mode to speed up read operations.
config NAND_OMAP_ELM
bool "Enable ELM driver for OMAPxx and AMxx platforms."
depends on NAND_OMAP_GPMC && !OMAP34XX
help
ELM controller is used for ECC error detection (not ECC calculation)
of BCH4, BCH8 and BCH16 ECC algorithms.
Some legacy platforms like OMAP3xx do not have in-built ELM h/w engine,
thus such SoC platforms need to depend on software library for ECC error
detection. However ECC calculation on such plaforms would still be
done by GPMC controller.
config NAND_VF610_NFC
bool "Support for Freescale NFC for VF610"
select SYS_NAND_SELF_INIT
imply CMD_NAND
help
Enables support for NAND Flash Controller on some Freescale
processors like the VF610, MCF54418 or Kinetis K70.
The driver supports a maximum 2k page size. The driver
currently does not support hardware ECC.
choice
prompt "Hardware ECC strength"
depends on NAND_VF610_NFC
default SYS_NAND_VF610_NFC_45_ECC_BYTES
help
Select the ECC strength used in the hardware BCH ECC block.
config SYS_NAND_VF610_NFC_45_ECC_BYTES
bool "24-error correction (45 ECC bytes)"
config SYS_NAND_VF610_NFC_60_ECC_BYTES
bool "32-error correction (60 ECC bytes)"
endchoice
config NAND_PXA3XX
bool "Support for NAND on PXA3xx and Armada 370/XP/38x"
select SYS_NAND_SELF_INIT
imply CMD_NAND
help
This enables the driver for the NAND flash device found on
PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
config NAND_SUNXI
bool "Support for NAND on Allwinner SoCs"
default ARCH_SUNXI
depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || MACH_SUN8I
select SYS_NAND_SELF_INIT
select SYS_NAND_U_BOOT_LOCATIONS
select SPL_NAND_SUPPORT
imply CMD_NAND
---help---
Enable support for NAND. This option enables the standard and
SPL drivers.
The SPL driver only supports reading from the NAND using DMA
transfers.
if NAND_SUNXI
config NAND_SUNXI_SPL_ECC_STRENGTH
int "Allwinner NAND SPL ECC Strength"
default 64
config NAND_SUNXI_SPL_ECC_SIZE
int "Allwinner NAND SPL ECC Step Size"
default 1024
config NAND_SUNXI_SPL_USABLE_PAGE_SIZE
int "Allwinner NAND SPL Usable Page Size"
default 1024
endif
config NAND_ARASAN
bool "Configure Arasan Nand"
select SYS_NAND_SELF_INIT
imply CMD_NAND
help
This enables Nand driver support for Arasan nand flash
controller. This uses the hardware ECC for read and
write operations.
config NAND_MXC
bool "MXC NAND support"
depends on CPU_ARM926EJS || CPU_ARM1136 || MX5
imply CMD_NAND
help
This enables the NAND driver for the NAND flash controller on the
i.MX27 / i.MX31 / i.MX5 rocessors.
config NAND_MXS
bool "MXS NAND support"
depends on MX23 || MX28 || MX6 || MX7
select SYS_NAND_SELF_INIT
imply CMD_NAND
select APBH_DMA
select APBH_DMA_BURST if ARCH_MX6 || ARCH_MX7
select APBH_DMA_BURST8 if ARCH_MX6 || ARCH_MX7
help
This enables NAND driver for the NAND flash controller on the
MXS processors.
if NAND_MXS
config NAND_MXS_DT
bool "Support MXS NAND controller as a DT device"
depends on OF_CONTROL && MTD
help
Enable the driver for MXS NAND flash on platforms using
device tree.
config NAND_MXS_USE_MINIMUM_ECC
bool "Use minimum ECC strength supported by the controller"
default false
endif
config NAND_ZYNQ
bool "Support for Zynq Nand controller"
select SYS_NAND_SELF_INIT
imply CMD_NAND
help
This enables Nand driver support for Nand flash controller
found on Zynq SoC.
config NAND_ZYNQ_USE_BOOTLOADER1_TIMINGS
bool "Enable use of 1st stage bootloader timing for NAND"
depends on NAND_ZYNQ
help
This flag prevent U-boot reconfigure NAND flash controller and reuse
the NAND timing from 1st stage bootloader.
comment "Generic NAND options"
config SYS_NAND_BLOCK_SIZE
hex "NAND chip eraseblock size"
depends on ARCH_SUNXI
help
Number of data bytes in one eraseblock for the NAND chip on the
board. This is the multiple of NAND_PAGE_SIZE and the number of
pages.
config SYS_NAND_PAGE_SIZE
hex "NAND chip page size"
depends on ARCH_SUNXI
help
Number of data bytes in one page for the NAND chip on the
board, not including the OOB area.
config SYS_NAND_OOBSIZE
hex "NAND chip OOB size"
depends on ARCH_SUNXI
help
Number of bytes in the Out-Of-Band area for the NAND chip on
the board.
# Enhance depends when converting drivers to Kconfig which use this config
# option (mxc_nand, ndfc, omap_gpmc).
config SYS_NAND_BUSWIDTH_16BIT
bool "Use 16-bit NAND interface"
depends on NAND_VF610_NFC || NAND_OMAP_GPMC || NAND_MXC || ARCH_DAVINCI
help
Indicates that NAND device has 16-bit wide data-bus. In absence of this
config, bus-width of NAND device is assumed to be either 8-bit and later
determined by reading ONFI params.
Above config is useful when NAND device's bus-width information cannot
be determined from on-chip ONFI params, like in following scenarios:
- SPL boot does not support reading of ONFI parameters. This is done to
keep SPL code foot-print small.
- In current U-Boot flow using nand_init(), driver initialization
happens in board_nand_init() which is called before any device probe
(nand_scan_ident + nand_scan_tail), thus device's ONFI parameters are
not available while configuring controller. So a static CONFIG_NAND_xx
is needed to know the device's bus-width in advance.
if SPL
config SYS_NAND_U_BOOT_LOCATIONS
bool "Define U-boot binaries locations in NAND"
help
Enable CONFIG_SYS_NAND_U_BOOT_OFFS though Kconfig.
This option should not be enabled when compiling U-boot for boards
defining CONFIG_SYS_NAND_U_BOOT_OFFS in their include/configs/<board>.h
file.
config SYS_NAND_U_BOOT_OFFS
hex "Location in NAND to read U-Boot from"
default 0x800000 if NAND_SUNXI
depends on SYS_NAND_U_BOOT_LOCATIONS
help
Set the offset from the start of the nand where u-boot should be
loaded from.
config SYS_NAND_U_BOOT_OFFS_REDUND
hex "Location in NAND to read U-Boot from"
default SYS_NAND_U_BOOT_OFFS
depends on SYS_NAND_U_BOOT_LOCATIONS
help
Set the offset from the start of the nand where the redundant u-boot
should be loaded from.
config SPL_NAND_AM33XX_BCH
bool "Enables SPL-NAND driver which supports ELM based"
depends on NAND_OMAP_GPMC && !OMAP34XX
default y
help
Hardware ECC correction. This is useful for platforms which have ELM
hardware engine and use NAND boot mode.
Some legacy platforms like OMAP3xx do not have in-built ELM h/w engine,
so those platforms should use CONFIG_SPL_NAND_SIMPLE for enabling
SPL-NAND driver with software ECC correction support.
config SPL_NAND_DENALI
bool "Support Denali NAND controller for SPL"
help
This is a small implementation of the Denali NAND controller
for use on SPL.
config SPL_NAND_SIMPLE
bool "Use simple SPL NAND driver"
depends on !SPL_NAND_AM33XX_BCH
help
Support for NAND boot using simple NAND drivers that
expose the cmd_ctrl() interface.
endif
endif # if NAND

View file

@ -0,0 +1,77 @@
# SPDX-License-Identifier: GPL-2.0+
#
# (C) Copyright 2006
# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
ifdef CONFIG_SPL_BUILD
ifdef CONFIG_SPL_NAND_DRIVERS
NORMAL_DRIVERS=y
endif
obj-$(CONFIG_SPL_NAND_AM33XX_BCH) += am335x_spl_bch.o
obj-$(CONFIG_SPL_NAND_DENALI) += denali_spl.o
obj-$(CONFIG_SPL_NAND_SIMPLE) += nand_spl_simple.o
obj-$(CONFIG_SPL_NAND_LOAD) += nand_spl_load.o
obj-$(CONFIG_SPL_NAND_ECC) += nand_ecc.o
obj-$(CONFIG_SPL_NAND_BASE) += nand_base.o
obj-$(CONFIG_SPL_NAND_IDENT) += nand_ids.o nand_timings.o
obj-$(CONFIG_SPL_NAND_INIT) += nand.o
ifeq ($(CONFIG_SPL_ENV_SUPPORT),y)
obj-$(CONFIG_ENV_IS_IN_NAND) += nand_util.o
endif
else # not spl
NORMAL_DRIVERS=y
obj-y += nand.o
obj-y += nand_bbt.o
obj-y += nand_ids.o
obj-y += nand_util.o
obj-y += nand_ecc.o
obj-y += nand_base.o
obj-y += nand_timings.o
endif # not spl
ifdef NORMAL_DRIVERS
obj-$(CONFIG_NAND_ECC_BCH) += nand_bch.o
obj-$(CONFIG_NAND_ATMEL) += atmel_nand.o
obj-$(CONFIG_NAND_ARASAN) += arasan_nfc.o
obj-$(CONFIG_NAND_DAVINCI) += davinci_nand.o
obj-$(CONFIG_NAND_DENALI) += denali.o
obj-$(CONFIG_NAND_DENALI_DT) += denali_dt.o
obj-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_nand.o
obj-$(CONFIG_NAND_FSL_IFC) += fsl_ifc_nand.o
obj-$(CONFIG_NAND_FSL_UPM) += fsl_upm.o
obj-$(CONFIG_NAND_FSMC) += fsmc_nand.o
obj-$(CONFIG_NAND_KB9202) += kb9202_nand.o
obj-$(CONFIG_NAND_KIRKWOOD) += kirkwood_nand.o
obj-$(CONFIG_NAND_KMETER1) += kmeter1_nand.o
obj-$(CONFIG_NAND_LPC32XX_MLC) += lpc32xx_nand_mlc.o
obj-$(CONFIG_NAND_LPC32XX_SLC) += lpc32xx_nand_slc.o
obj-$(CONFIG_NAND_VF610_NFC) += vf610_nfc.o
obj-$(CONFIG_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_NAND_MXS) += mxs_nand.o
obj-$(CONFIG_NAND_MXS_DT) += mxs_nand_dt.o
obj-$(CONFIG_NAND_PXA3XX) += pxa3xx_nand.o
obj-$(CONFIG_NAND_SPEAR) += spr_nand.o
obj-$(CONFIG_TEGRA_NAND) += tegra_nand.o
obj-$(CONFIG_NAND_OMAP_GPMC) += omap_gpmc.o
obj-$(CONFIG_NAND_OMAP_ELM) += omap_elm.o
obj-$(CONFIG_NAND_PLAT) += nand_plat.o
obj-$(CONFIG_NAND_SUNXI) += sunxi_nand.o
obj-$(CONFIG_NAND_ZYNQ) += zynq_nand.o
else # minimal SPL drivers
obj-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_spl.o
obj-$(CONFIG_NAND_FSL_IFC) += fsl_ifc_spl.o
obj-$(CONFIG_NAND_MXC) += mxc_nand_spl.o
obj-$(CONFIG_NAND_MXS) += mxs_nand_spl.o mxs_nand.o
obj-$(CONFIG_NAND_SUNXI) += sunxi_nand_spl.o
endif # drivers

View file

@ -9,7 +9,7 @@
/*
*
* linux/drivers/mtd/nand/nand_davinci.c
* linux/drivers/mtd/nand/raw/nand_davinci.c
*
* NAND Flash Driver
*

View file

@ -1863,33 +1863,6 @@ read_retry:
return max_bitflips;
}
/**
* nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
* @mtd: MTD device structure
* @from: offset to read from
* @len: number of bytes to read
* @retlen: pointer to variable to store the number of read bytes
* @buf: the databuffer to put data
*
* Get hold of the chip and call nand_do_read.
*/
static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, uint8_t *buf)
{
struct mtd_oob_ops ops;
int ret;
nand_get_device(mtd, FL_READING);
memset(&ops, 0, sizeof(ops));
ops.len = len;
ops.datbuf = buf;
ops.mode = MTD_OPS_PLACE_OOB;
ret = nand_do_read_ops(mtd, from, &ops);
*retlen = ops.retlen;
nand_release_device(mtd);
return ret;
}
/**
* nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
* @mtd: mtd info structure
@ -2674,33 +2647,6 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
return ret;
}
/**
* nand_write - [MTD Interface] NAND write with ECC
* @mtd: MTD device structure
* @to: offset to write to
* @len: number of bytes to write
* @retlen: pointer to variable to store the number of written bytes
* @buf: the data to write
*
* NAND write with ECC.
*/
static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
struct mtd_oob_ops ops;
int ret;
nand_get_device(mtd, FL_WRITING);
memset(&ops, 0, sizeof(ops));
ops.len = len;
ops.datbuf = (uint8_t *)buf;
ops.mode = MTD_OPS_PLACE_OOB;
ret = nand_do_write_ops(mtd, to, &ops);
*retlen = ops.retlen;
nand_release_device(mtd);
return ret;
}
/**
* nand_do_write_oob - [MTD Interface] NAND write out-of-band
* @mtd: MTD device structure
@ -4620,8 +4566,6 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
MTD_CAP_NANDFLASH;
mtd->_erase = nand_erase;
mtd->_read = nand_read;
mtd->_write = nand_write;
mtd->_panic_write = panic_nand_write;
mtd->_read_oob = nand_read_oob;
mtd->_write_oob = nand_write_oob;

View file

@ -3,7 +3,7 @@
* This file contains an ECC algorithm from Toshiba that detects and
* corrects 1 bit errors in a 256 byte block of data.
*
* drivers/mtd/nand/nand_ecc.c
* drivers/mtd/nand/raw/nand_ecc.c
*
* Copyright (C) 2000-2004 Steven J. Hill (sjhill@realitydiluted.com)
* Toshiba America Electronics Components, Inc.

View file

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/mtd/nand/nand_util.c
* drivers/mtd/nand/raw/nand_util.c
*
* Copyright (C) 2006 by Weiss-Electronic GmbH.
* All rights reserved.

View file

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/mtd/nand/pxa3xx_nand.c
* drivers/mtd/nand/raw/pxa3xx_nand.c
*
* Copyright © 2005 Intel Corporation
* Copyright © 2006 Marvell International Ltd.

View file

@ -0,0 +1,7 @@
menuconfig MTD_SPI_NAND
bool "SPI NAND device Support"
depends on MTD && DM_SPI
select MTD_NAND_CORE
select SPI_MEM
help
This is the framework for the SPI NAND device drivers.

View file

@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
spinand-objs := core.o macronix.o micron.o winbond.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o

1254
drivers/mtd/nand/spi/core.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,146 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018 Macronix
*
* Author: Boris Brezillon <boris.brezillon@bootlin.com>
*/
#ifndef __UBOOT__
#include <linux/device.h>
#include <linux/kernel.h>
#endif
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_MACRONIX 0xC2
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
static int mx35lfxge4ab_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
return -ERANGE;
}
static int mx35lfxge4ab_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
region->offset = 2;
region->length = mtd->oobsize - 2;
return 0;
}
static const struct mtd_ooblayout_ops mx35lfxge4ab_ooblayout = {
.ecc = mx35lfxge4ab_ooblayout_ecc,
.free = mx35lfxge4ab_ooblayout_free,
};
static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
{
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_DUMMY(1, 1),
SPI_MEM_OP_DATA_IN(1, eccsr, 1));
return spi_mem_exec_op(spinand->slave, &op);
}
static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 eccsr;
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
return 0;
case STATUS_ECC_UNCOR_ERROR:
return -EBADMSG;
case STATUS_ECC_HAS_BITFLIPS:
/*
* Let's try to retrieve the real maximum number of bitflips
* in order to avoid forcing the wear-leveling layer to move
* data around if it's not necessary.
*/
if (mx35lf1ge4ab_get_eccsr(spinand, &eccsr))
return nand->eccreq.strength;
if (WARN_ON(eccsr > nand->eccreq.strength || !eccsr))
return nand->eccreq.strength;
return eccsr;
default:
break;
}
return -EINVAL;
}
static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO("MX35LF1GE4AB", 0x12,
NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
mx35lf1ge4ab_ecc_get_status)),
SPINAND_INFO("MX35LF2GE4AB", 0x22,
NAND_MEMORG(1, 2048, 64, 64, 2048, 2, 1, 1),
NAND_ECCREQ(4, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
};
static int macronix_spinand_detect(struct spinand_device *spinand)
{
u8 *id = spinand->id.data;
int ret;
/*
* Macronix SPI NAND read ID needs a dummy byte, so the first byte in
* raw_id is garbage.
*/
if (id[1] != SPINAND_MFR_MACRONIX)
return 0;
ret = spinand_match_and_init(spinand, macronix_spinand_table,
ARRAY_SIZE(macronix_spinand_table),
id[2]);
if (ret)
return ret;
return 1;
}
static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
.detect = macronix_spinand_detect,
};
const struct spinand_manufacturer macronix_spinand_manufacturer = {
.id = SPINAND_MFR_MACRONIX,
.name = "Macronix",
.ops = &macronix_spinand_manuf_ops,
};

View file

@ -0,0 +1,135 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016-2017 Micron Technology, Inc.
*
* Authors:
* Peter Pan <peterpandong@micron.com>
*/
#ifndef __UBOOT__
#include <linux/device.h>
#include <linux/kernel.h>
#endif
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_MICRON 0x2c
#define MICRON_STATUS_ECC_MASK GENMASK(7, 4)
#define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4)
#define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4)
#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
#define MICRON_STATUS_ECC_7TO8_BITFLIPS (5 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
static int mt29f2g01abagd_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
region->offset = 64;
region->length = 64;
return 0;
}
static int mt29f2g01abagd_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section)
return -ERANGE;
/* Reserve 2 bytes for the BBM. */
region->offset = 2;
region->length = 62;
return 0;
}
static const struct mtd_ooblayout_ops mt29f2g01abagd_ooblayout = {
.ecc = mt29f2g01abagd_ooblayout_ecc,
.free = mt29f2g01abagd_ooblayout_free,
};
static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
switch (status & MICRON_STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
return 0;
case STATUS_ECC_UNCOR_ERROR:
return -EBADMSG;
case MICRON_STATUS_ECC_1TO3_BITFLIPS:
return 3;
case MICRON_STATUS_ECC_4TO6_BITFLIPS:
return 6;
case MICRON_STATUS_ECC_7TO8_BITFLIPS:
return 8;
default:
break;
}
return -EINVAL;
}
static const struct spinand_info micron_spinand_table[] = {
SPINAND_INFO("MT29F2G01ABAGD", 0x24,
NAND_MEMORG(1, 2048, 128, 64, 2048, 2, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&mt29f2g01abagd_ooblayout,
mt29f2g01abagd_ecc_get_status)),
};
static int micron_spinand_detect(struct spinand_device *spinand)
{
u8 *id = spinand->id.data;
int ret;
/*
* Micron SPI NAND read ID need a dummy byte,
* so the first byte in raw_id is dummy.
*/
if (id[1] != SPINAND_MFR_MICRON)
return 0;
ret = spinand_match_and_init(spinand, micron_spinand_table,
ARRAY_SIZE(micron_spinand_table), id[2]);
if (ret)
return ret;
return 1;
}
static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = {
.detect = micron_spinand_detect,
};
const struct spinand_manufacturer micron_spinand_manufacturer = {
.id = SPINAND_MFR_MICRON,
.name = "Micron",
.ops = &micron_spinand_manuf_ops,
};

View file

@ -0,0 +1,143 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017 exceet electronics GmbH
*
* Authors:
* Frieder Schrempf <frieder.schrempf@exceet.de>
* Boris Brezillon <boris.brezillon@bootlin.com>
*/
#ifndef __UBOOT__
#include <linux/device.h>
#include <linux/kernel.h>
#endif
#include <linux/mtd/spinand.h>
#define SPINAND_MFR_WINBOND 0xEF
#define WINBOND_CFG_BUF_READ BIT(3)
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
static int w25m02gv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 3)
return -ERANGE;
region->offset = (16 * section) + 8;
region->length = 8;
return 0;
}
static int w25m02gv_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 3)
return -ERANGE;
region->offset = (16 * section) + 2;
region->length = 6;
return 0;
}
static const struct mtd_ooblayout_ops w25m02gv_ooblayout = {
.ecc = w25m02gv_ooblayout_ecc,
.free = w25m02gv_ooblayout_free,
};
static int w25m02gv_select_target(struct spinand_device *spinand,
unsigned int target)
{
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0xc2, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(1,
spinand->scratchbuf,
1));
*spinand->scratchbuf = target;
return spi_mem_exec_op(spinand->slave, &op);
}
static const struct spinand_info winbond_spinand_table[] = {
SPINAND_INFO("W25M02GV", 0xAB,
NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 2),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
SPINAND_SELECT_TARGET(w25m02gv_select_target)),
};
/**
* winbond_spinand_detect - initialize device related part in spinand_device
* struct if it is a Winbond device.
* @spinand: SPI NAND device structure
*/
static int winbond_spinand_detect(struct spinand_device *spinand)
{
u8 *id = spinand->id.data;
int ret;
/*
* Winbond SPI NAND read ID need a dummy byte,
* so the first byte in raw_id is dummy.
*/
if (id[1] != SPINAND_MFR_WINBOND)
return 0;
ret = spinand_match_and_init(spinand, winbond_spinand_table,
ARRAY_SIZE(winbond_spinand_table), id[2]);
if (ret)
return ret;
return 1;
}
static int winbond_spinand_init(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int i;
/*
* Make sure all dies are in buffer read mode and not continuous read
* mode.
*/
for (i = 0; i < nand->memorg.ntargets; i++) {
spinand_select_target(spinand, i);
spinand_upd_cfg(spinand, WINBOND_CFG_BUF_READ,
WINBOND_CFG_BUF_READ);
}
return 0;
}
static const struct spinand_manufacturer_ops winbond_spinand_manuf_ops = {
.detect = winbond_spinand_detect,
.init = winbond_spinand_init,
};
const struct spinand_manufacturer winbond_spinand_manufacturer = {
.id = SPINAND_MFR_WINBOND,
.name = "Winbond",
.ops = &winbond_spinand_manuf_ops,
};

View file

@ -2656,8 +2656,6 @@ int onenand_probe(struct mtd_info *mtd)
mtd->flags = MTD_CAP_NANDFLASH;
mtd->_erase = onenand_erase;
mtd->_read = onenand_read;
mtd->_write = onenand_write;
mtd->_read_oob = onenand_read_oob;
mtd->_write_oob = onenand_write_oob;
mtd->_sync = onenand_sync;

View file

@ -18,6 +18,13 @@ config DM_SPI
if DM_SPI
config SPI_MEM
bool "SPI memory extension"
help
Enable this option if you want to enable the SPI memory extension.
This extension is meant to simplify interaction with SPI memories
by providing an high-level interface to send memory-like commands.
config ALTERA_SPI
bool "Altera SPI driver"
help

View file

@ -8,6 +8,7 @@ ifdef CONFIG_DM_SPI
obj-y += spi-uclass.o
obj-$(CONFIG_SANDBOX) += spi-emul-uclass.o
obj-$(CONFIG_SOFT_SPI) += soft_spi.o
obj-$(CONFIG_SPI_MEM) += spi-mem.o
else
obj-y += spi.o
obj-$(CONFIG_SOFT_SPI) += soft_spi_legacy.o

View file

@ -17,6 +17,7 @@
#include <malloc.h>
#include <spi.h>
#include <fdtdec.h>
#include <reset.h>
#include <linux/compat.h>
#include <linux/iopoll.h>
#include <asm/io.h>
@ -111,6 +112,8 @@ struct dw_spi_priv {
void *tx_end;
void *rx;
void *rx_end;
struct reset_ctl_bulk resets;
};
static inline u32 dw_read(struct dw_spi_priv *priv, u32 offset)
@ -231,6 +234,34 @@ err_rate:
return -EINVAL;
}
static int dw_spi_reset(struct udevice *bus)
{
int ret;
struct dw_spi_priv *priv = dev_get_priv(bus);
ret = reset_get_bulk(bus, &priv->resets);
if (ret) {
/*
* Return 0 if error due to !CONFIG_DM_RESET and reset
* DT property is not present.
*/
if (ret == -ENOENT || ret == -ENOTSUPP)
return 0;
dev_warn(bus, "Can't get reset: %d\n", ret);
return ret;
}
ret = reset_deassert_bulk(&priv->resets);
if (ret) {
reset_release_bulk(&priv->resets);
dev_err(bus, "Failed to reset: %d\n", ret);
return ret;
}
return 0;
}
static int dw_spi_probe(struct udevice *bus)
{
struct dw_spi_platdata *plat = dev_get_platdata(bus);
@ -244,6 +275,10 @@ static int dw_spi_probe(struct udevice *bus)
if (ret)
return ret;
ret = dw_spi_reset(bus);
if (ret)
return ret;
/* Currently only bits_per_word == 8 supported */
priv->bits_per_word = 8;
@ -478,6 +513,13 @@ static int dw_spi_set_mode(struct udevice *bus, uint mode)
return 0;
}
static int dw_spi_remove(struct udevice *bus)
{
struct dw_spi_priv *priv = dev_get_priv(bus);
return reset_release_bulk(&priv->resets);
}
static const struct dm_spi_ops dw_spi_ops = {
.xfer = dw_spi_xfer,
.set_speed = dw_spi_set_speed,
@ -502,4 +544,5 @@ U_BOOT_DRIVER(dw_spi) = {
.platdata_auto_alloc_size = sizeof(struct dw_spi_platdata),
.priv_auto_alloc_size = sizeof(struct dw_spi_priv),
.probe = dw_spi_probe,
.remove = dw_spi_remove,
};

View file

@ -84,7 +84,6 @@ DECLARE_GLOBAL_DATA_PTR;
/* QSPI max chipselect signals number */
#define FSL_QSPI_MAX_CHIPSELECT_NUM 4
#ifdef CONFIG_DM_SPI
/**
* struct fsl_qspi_platdata - platform data for Freescale QSPI
*
@ -105,7 +104,6 @@ struct fsl_qspi_platdata {
u32 flash_num;
u32 num_chipselect;
};
#endif
/**
* struct fsl_qspi_priv - private data for Freescale QSPI
@ -136,12 +134,6 @@ struct fsl_qspi_priv {
struct fsl_qspi_regs *regs;
};
#ifndef CONFIG_DM_SPI
struct fsl_qspi {
struct spi_slave slave;
struct fsl_qspi_priv priv;
};
#endif
static u32 qspi_read32(u32 flags, u32 *addr)
{
@ -869,136 +861,7 @@ void qspi_cfg_smpr(struct fsl_qspi_priv *priv, u32 clear_bits, u32 set_bits)
smpr_val |= set_bits;
qspi_write32(priv->flags, &priv->regs->smpr, smpr_val);
}
#ifndef CONFIG_DM_SPI
static unsigned long spi_bases[] = {
QSPI0_BASE_ADDR,
#ifdef CONFIG_MX6SX
QSPI1_BASE_ADDR,
#endif
};
static unsigned long amba_bases[] = {
QSPI0_AMBA_BASE,
#ifdef CONFIG_MX6SX
QSPI1_AMBA_BASE,
#endif
};
static inline struct fsl_qspi *to_qspi_spi(struct spi_slave *slave)
{
return container_of(slave, struct fsl_qspi, slave);
}
struct spi_slave *spi_setup_slave(unsigned int bus, unsigned int cs,
unsigned int max_hz, unsigned int mode)
{
u32 mcr_val;
struct fsl_qspi *qspi;
struct fsl_qspi_regs *regs;
u32 total_size;
if (bus >= ARRAY_SIZE(spi_bases))
return NULL;
if (cs >= FSL_QSPI_FLASH_NUM)
return NULL;
qspi = spi_alloc_slave(struct fsl_qspi, bus, cs);
if (!qspi)
return NULL;
#ifdef CONFIG_SYS_FSL_QSPI_BE
qspi->priv.flags |= QSPI_FLAG_REGMAP_ENDIAN_BIG;
#endif
regs = (struct fsl_qspi_regs *)spi_bases[bus];
qspi->priv.regs = regs;
/*
* According cs, use different amba_base to choose the
* corresponding flash devices.
*
* If not, only one flash device is used even if passing
* different cs using `sf probe`
*/
qspi->priv.cur_amba_base = amba_bases[bus] + cs * FSL_QSPI_FLASH_SIZE;
qspi->slave.max_write_size = TX_BUFFER_SIZE;
mcr_val = qspi_read32(qspi->priv.flags, &regs->mcr);
/* Set endianness to LE for i.mx */
if (IS_ENABLED(CONFIG_MX6) || IS_ENABLED(CONFIG_MX7))
mcr_val = QSPI_MCR_END_CFD_LE;
qspi_write32(qspi->priv.flags, &regs->mcr,
QSPI_MCR_RESERVED_MASK | QSPI_MCR_MDIS_MASK |
(mcr_val & QSPI_MCR_END_CFD_MASK));
qspi_cfg_smpr(&qspi->priv,
~(QSPI_SMPR_FSDLY_MASK | QSPI_SMPR_DDRSMP_MASK |
QSPI_SMPR_FSPHS_MASK | QSPI_SMPR_HSENA_MASK), 0);
total_size = FSL_QSPI_FLASH_SIZE * FSL_QSPI_FLASH_NUM;
/*
* Any read access to non-implemented addresses will provide
* undefined results.
*
* In case single die flash devices, TOP_ADDR_MEMA2 and
* TOP_ADDR_MEMB2 should be initialized/programmed to
* TOP_ADDR_MEMA1 and TOP_ADDR_MEMB1 respectively - in effect,
* setting the size of these devices to 0. This would ensure
* that the complete memory map is assigned to only one flash device.
*/
qspi_write32(qspi->priv.flags, &regs->sfa1ad,
FSL_QSPI_FLASH_SIZE | amba_bases[bus]);
qspi_write32(qspi->priv.flags, &regs->sfa2ad,
FSL_QSPI_FLASH_SIZE | amba_bases[bus]);
qspi_write32(qspi->priv.flags, &regs->sfb1ad,
total_size | amba_bases[bus]);
qspi_write32(qspi->priv.flags, &regs->sfb2ad,
total_size | amba_bases[bus]);
qspi_set_lut(&qspi->priv);
#ifdef CONFIG_SYS_FSL_QSPI_AHB
qspi_init_ahb_read(&qspi->priv);
#endif
qspi_module_disable(&qspi->priv, 0);
return &qspi->slave;
}
void spi_free_slave(struct spi_slave *slave)
{
struct fsl_qspi *qspi = to_qspi_spi(slave);
free(qspi);
}
int spi_claim_bus(struct spi_slave *slave)
{
return 0;
}
void spi_release_bus(struct spi_slave *slave)
{
/* Nothing to do */
}
int spi_xfer(struct spi_slave *slave, unsigned int bitlen,
const void *dout, void *din, unsigned long flags)
{
struct fsl_qspi *qspi = to_qspi_spi(slave);
return qspi_xfer(&qspi->priv, bitlen, dout, din, flags);
}
void spi_init(void)
{
/* Nothing to do */
}
#else
static int fsl_qspi_child_pre_probe(struct udevice *dev)
{
struct spi_slave *slave = dev_get_parent_priv(dev);
@ -1265,4 +1128,3 @@ U_BOOT_DRIVER(fsl_qspi) = {
.probe = fsl_qspi_probe,
.child_pre_probe = fsl_qspi_child_pre_probe,
};
#endif

View file

@ -67,15 +67,12 @@ struct sh_qspi_regs {
};
struct sh_qspi_slave {
#ifndef CONFIG_DM_SPI
struct spi_slave slave;
#endif
struct sh_qspi_regs *regs;
};
static inline struct sh_qspi_slave *to_sh_qspi(struct spi_slave *slave)
{
return container_of(slave, struct sh_qspi_slave, slave);
}
static void sh_qspi_init(struct sh_qspi_slave *ss)
{
/* QSPI initialize */
@ -119,15 +116,8 @@ static void sh_qspi_init(struct sh_qspi_slave *ss)
setbits_8(&ss->regs->spcr, SPCR_SPE);
}
int spi_cs_is_valid(unsigned int bus, unsigned int cs)
static void sh_qspi_cs_activate(struct sh_qspi_slave *ss)
{
return 1;
}
void spi_cs_activate(struct spi_slave *slave)
{
struct sh_qspi_slave *ss = to_sh_qspi(slave);
/* Set master mode only */
writeb(SPCR_MSTR, &ss->regs->spcr);
@ -147,61 +137,15 @@ void spi_cs_activate(struct spi_slave *slave)
setbits_8(&ss->regs->spcr, SPCR_SPE);
}
void spi_cs_deactivate(struct spi_slave *slave)
static void sh_qspi_cs_deactivate(struct sh_qspi_slave *ss)
{
struct sh_qspi_slave *ss = to_sh_qspi(slave);
/* Disable SPI Function */
clrbits_8(&ss->regs->spcr, SPCR_SPE);
}
void spi_init(void)
static int sh_qspi_xfer_common(struct sh_qspi_slave *ss, unsigned int bitlen,
const void *dout, void *din, unsigned long flags)
{
/* nothing to do */
}
struct spi_slave *spi_setup_slave(unsigned int bus, unsigned int cs,
unsigned int max_hz, unsigned int mode)
{
struct sh_qspi_slave *ss;
if (!spi_cs_is_valid(bus, cs))
return NULL;
ss = spi_alloc_slave(struct sh_qspi_slave, bus, cs);
if (!ss) {
printf("SPI_error: Fail to allocate sh_qspi_slave\n");
return NULL;
}
ss->regs = (struct sh_qspi_regs *)SH_QSPI_BASE;
/* Init SH QSPI */
sh_qspi_init(ss);
return &ss->slave;
}
void spi_free_slave(struct spi_slave *slave)
{
struct sh_qspi_slave *spi = to_sh_qspi(slave);
free(spi);
}
int spi_claim_bus(struct spi_slave *slave)
{
return 0;
}
void spi_release_bus(struct spi_slave *slave)
{
}
int spi_xfer(struct spi_slave *slave, unsigned int bitlen, const void *dout,
void *din, unsigned long flags)
{
struct sh_qspi_slave *ss = to_sh_qspi(slave);
u32 nbyte, chunk;
int i, ret = 0;
u8 dtdata = 0, drdata;
@ -210,7 +154,7 @@ int spi_xfer(struct spi_slave *slave, unsigned int bitlen, const void *dout,
if (dout == NULL && din == NULL) {
if (flags & SPI_XFER_END)
spi_cs_deactivate(slave);
sh_qspi_cs_deactivate(ss);
return 0;
}
@ -222,7 +166,7 @@ int spi_xfer(struct spi_slave *slave, unsigned int bitlen, const void *dout,
nbyte = bitlen / 8;
if (flags & SPI_XFER_BEGIN) {
spi_cs_activate(slave);
sh_qspi_cs_activate(ss);
/* Set 1048576 byte */
writel(0x100000, spbmul0);
@ -273,7 +217,148 @@ int spi_xfer(struct spi_slave *slave, unsigned int bitlen, const void *dout,
}
if (flags & SPI_XFER_END)
spi_cs_deactivate(slave);
sh_qspi_cs_deactivate(ss);
return ret;
}
#ifndef CONFIG_DM_SPI
static inline struct sh_qspi_slave *to_sh_qspi(struct spi_slave *slave)
{
return container_of(slave, struct sh_qspi_slave, slave);
}
int spi_cs_is_valid(unsigned int bus, unsigned int cs)
{
return 1;
}
void spi_cs_activate(struct spi_slave *slave)
{
struct sh_qspi_slave *ss = to_sh_qspi(slave);
sh_qspi_cs_activate(ss);
}
void spi_cs_deactivate(struct spi_slave *slave)
{
struct sh_qspi_slave *ss = to_sh_qspi(slave);
sh_qspi_cs_deactivate(ss);
}
void spi_init(void)
{
/* nothing to do */
}
struct spi_slave *spi_setup_slave(unsigned int bus, unsigned int cs,
unsigned int max_hz, unsigned int mode)
{
struct sh_qspi_slave *ss;
if (!spi_cs_is_valid(bus, cs))
return NULL;
ss = spi_alloc_slave(struct sh_qspi_slave, bus, cs);
if (!ss) {
printf("SPI_error: Fail to allocate sh_qspi_slave\n");
return NULL;
}
ss->regs = (struct sh_qspi_regs *)SH_QSPI_BASE;
/* Init SH QSPI */
sh_qspi_init(ss);
return &ss->slave;
}
void spi_free_slave(struct spi_slave *slave)
{
struct sh_qspi_slave *spi = to_sh_qspi(slave);
free(spi);
}
int spi_claim_bus(struct spi_slave *slave)
{
return 0;
}
void spi_release_bus(struct spi_slave *slave)
{
}
int spi_xfer(struct spi_slave *slave, unsigned int bitlen,
const void *dout, void *din, unsigned long flags)
{
struct sh_qspi_slave *ss = to_sh_qspi(slave);
return sh_qspi_xfer_common(ss, bitlen, dout, din, flags);
}
#else
#include <dm.h>
static int sh_qspi_xfer(struct udevice *dev, unsigned int bitlen,
const void *dout, void *din, unsigned long flags)
{
struct udevice *bus = dev->parent;
struct sh_qspi_slave *ss = dev_get_platdata(bus);
return sh_qspi_xfer_common(ss, bitlen, dout, din, flags);
}
static int sh_qspi_set_speed(struct udevice *dev, uint speed)
{
/* This is a SPI NOR controller, do nothing. */
return 0;
}
static int sh_qspi_set_mode(struct udevice *dev, uint mode)
{
/* This is a SPI NOR controller, do nothing. */
return 0;
}
static int sh_qspi_probe(struct udevice *dev)
{
struct sh_qspi_slave *ss = dev_get_platdata(dev);
sh_qspi_init(ss);
return 0;
}
static int sh_qspi_ofdata_to_platdata(struct udevice *dev)
{
struct sh_qspi_slave *plat = dev_get_platdata(dev);
plat->regs = (struct sh_qspi_regs *)dev_read_addr(dev);
return 0;
}
static const struct dm_spi_ops sh_qspi_ops = {
.xfer = sh_qspi_xfer,
.set_speed = sh_qspi_set_speed,
.set_mode = sh_qspi_set_mode,
};
static const struct udevice_id sh_qspi_ids[] = {
{ .compatible = "renesas,qspi" },
{ }
};
U_BOOT_DRIVER(sh_qspi) = {
.name = "sh_qspi",
.id = UCLASS_SPI,
.of_match = sh_qspi_ids,
.ops = &sh_qspi_ops,
.ofdata_to_platdata = sh_qspi_ofdata_to_platdata,
.platdata_auto_alloc_size = sizeof(struct sh_qspi_slave),
.probe = sh_qspi_probe,
};
#endif

501
drivers/spi/spi-mem.c Normal file
View file

@ -0,0 +1,501 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 Exceet Electronics GmbH
* Copyright (C) 2018 Bootlin
*
* Author: Boris Brezillon <boris.brezillon@bootlin.com>
*/
#ifndef __UBOOT__
#include <linux/dmaengine.h>
#include <linux/pm_runtime.h>
#include "internals.h"
#else
#include <spi.h>
#include <spi-mem.h>
#endif
#ifndef __UBOOT__
/**
* spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
* memory operation
* @ctlr: the SPI controller requesting this dma_map()
* @op: the memory operation containing the buffer to map
* @sgt: a pointer to a non-initialized sg_table that will be filled by this
* function
*
* Some controllers might want to do DMA on the data buffer embedded in @op.
* This helper prepares everything for you and provides a ready-to-use
* sg_table. This function is not intended to be called from spi drivers.
* Only SPI controller drivers should use it.
* Note that the caller must ensure the memory region pointed by
* op->data.buf.{in,out} is DMA-able before calling this function.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
const struct spi_mem_op *op,
struct sg_table *sgt)
{
struct device *dmadev;
if (!op->data.nbytes)
return -EINVAL;
if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
dmadev = ctlr->dma_tx->device->dev;
else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
dmadev = ctlr->dma_rx->device->dev;
else
dmadev = ctlr->dev.parent;
if (!dmadev)
return -EINVAL;
return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
op->data.dir == SPI_MEM_DATA_IN ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
/**
* spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
* memory operation
* @ctlr: the SPI controller requesting this dma_unmap()
* @op: the memory operation containing the buffer to unmap
* @sgt: a pointer to an sg_table previously initialized by
* spi_controller_dma_map_mem_op_data()
*
* Some controllers might want to do DMA on the data buffer embedded in @op.
* This helper prepares things so that the CPU can access the
* op->data.buf.{in,out} buffer again.
*
* This function is not intended to be called from SPI drivers. Only SPI
* controller drivers should use it.
*
* This function should be called after the DMA operation has finished and is
* only valid if the previous spi_controller_dma_map_mem_op_data() call
* returned 0.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
const struct spi_mem_op *op,
struct sg_table *sgt)
{
struct device *dmadev;
if (!op->data.nbytes)
return;
if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
dmadev = ctlr->dma_tx->device->dev;
else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
dmadev = ctlr->dma_rx->device->dev;
else
dmadev = ctlr->dev.parent;
spi_unmap_buf(ctlr, dmadev, sgt,
op->data.dir == SPI_MEM_DATA_IN ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
#endif /* __UBOOT__ */
static int spi_check_buswidth_req(struct spi_slave *slave, u8 buswidth, bool tx)
{
u32 mode = slave->mode;
switch (buswidth) {
case 1:
return 0;
case 2:
if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
(!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
return 0;
break;
case 4:
if ((tx && (mode & SPI_TX_QUAD)) ||
(!tx && (mode & SPI_RX_QUAD)))
return 0;
break;
default:
break;
}
return -ENOTSUPP;
}
bool spi_mem_default_supports_op(struct spi_slave *slave,
const struct spi_mem_op *op)
{
if (spi_check_buswidth_req(slave, op->cmd.buswidth, true))
return false;
if (op->addr.nbytes &&
spi_check_buswidth_req(slave, op->addr.buswidth, true))
return false;
if (op->dummy.nbytes &&
spi_check_buswidth_req(slave, op->dummy.buswidth, true))
return false;
if (op->data.nbytes &&
spi_check_buswidth_req(slave, op->data.buswidth,
op->data.dir == SPI_MEM_DATA_OUT))
return false;
return true;
}
EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
/**
* spi_mem_supports_op() - Check if a memory device and the controller it is
* connected to support a specific memory operation
* @slave: the SPI device
* @op: the memory operation to check
*
* Some controllers are only supporting Single or Dual IOs, others might only
* support specific opcodes, or it can even be that the controller and device
* both support Quad IOs but the hardware prevents you from using it because
* only 2 IO lines are connected.
*
* This function checks whether a specific operation is supported.
*
* Return: true if @op is supported, false otherwise.
*/
bool spi_mem_supports_op(struct spi_slave *slave,
const struct spi_mem_op *op)
{
struct udevice *bus = slave->dev->parent;
struct dm_spi_ops *ops = spi_get_ops(bus);
if (ops->mem_ops && ops->mem_ops->supports_op)
return ops->mem_ops->supports_op(slave, op);
return spi_mem_default_supports_op(slave, op);
}
EXPORT_SYMBOL_GPL(spi_mem_supports_op);
/**
* spi_mem_exec_op() - Execute a memory operation
* @slave: the SPI device
* @op: the memory operation to execute
*
* Executes a memory operation.
*
* This function first checks that @op is supported and then tries to execute
* it.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
int spi_mem_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
{
struct udevice *bus = slave->dev->parent;
struct dm_spi_ops *ops = spi_get_ops(bus);
unsigned int pos = 0;
const u8 *tx_buf = NULL;
u8 *rx_buf = NULL;
u8 *op_buf;
int op_len;
u32 flag;
int ret;
int i;
if (!spi_mem_supports_op(slave, op))
return -ENOTSUPP;
if (ops->mem_ops) {
#ifndef __UBOOT__
/*
* Flush the message queue before executing our SPI memory
* operation to prevent preemption of regular SPI transfers.
*/
spi_flush_queue(ctlr);
if (ctlr->auto_runtime_pm) {
ret = pm_runtime_get_sync(ctlr->dev.parent);
if (ret < 0) {
dev_err(&ctlr->dev,
"Failed to power device: %d\n",
ret);
return ret;
}
}
mutex_lock(&ctlr->bus_lock_mutex);
mutex_lock(&ctlr->io_mutex);
#endif
ret = ops->mem_ops->exec_op(slave, op);
#ifndef __UBOOT__
mutex_unlock(&ctlr->io_mutex);
mutex_unlock(&ctlr->bus_lock_mutex);
if (ctlr->auto_runtime_pm)
pm_runtime_put(ctlr->dev.parent);
#endif
/*
* Some controllers only optimize specific paths (typically the
* read path) and expect the core to use the regular SPI
* interface in other cases.
*/
if (!ret || ret != -ENOTSUPP)
return ret;
}
#ifndef __UBOOT__
tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
op->dummy.nbytes;
/*
* Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
* we're guaranteed that this buffer is DMA-able, as required by the
* SPI layer.
*/
tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
if (!tmpbuf)
return -ENOMEM;
spi_message_init(&msg);
tmpbuf[0] = op->cmd.opcode;
xfers[xferpos].tx_buf = tmpbuf;
xfers[xferpos].len = sizeof(op->cmd.opcode);
xfers[xferpos].tx_nbits = op->cmd.buswidth;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen++;
if (op->addr.nbytes) {
int i;
for (i = 0; i < op->addr.nbytes; i++)
tmpbuf[i + 1] = op->addr.val >>
(8 * (op->addr.nbytes - i - 1));
xfers[xferpos].tx_buf = tmpbuf + 1;
xfers[xferpos].len = op->addr.nbytes;
xfers[xferpos].tx_nbits = op->addr.buswidth;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->addr.nbytes;
}
if (op->dummy.nbytes) {
memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
xfers[xferpos].len = op->dummy.nbytes;
xfers[xferpos].tx_nbits = op->dummy.buswidth;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->dummy.nbytes;
}
if (op->data.nbytes) {
if (op->data.dir == SPI_MEM_DATA_IN) {
xfers[xferpos].rx_buf = op->data.buf.in;
xfers[xferpos].rx_nbits = op->data.buswidth;
} else {
xfers[xferpos].tx_buf = op->data.buf.out;
xfers[xferpos].tx_nbits = op->data.buswidth;
}
xfers[xferpos].len = op->data.nbytes;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->data.nbytes;
}
ret = spi_sync(slave, &msg);
kfree(tmpbuf);
if (ret)
return ret;
if (msg.actual_length != totalxferlen)
return -EIO;
#else
/* U-Boot does not support parallel SPI data lanes */
if ((op->cmd.buswidth != 1) ||
(op->addr.nbytes && op->addr.buswidth != 1) ||
(op->dummy.nbytes && op->dummy.buswidth != 1) ||
(op->data.nbytes && op->data.buswidth != 1)) {
printf("Dual/Quad raw SPI transfers not supported\n");
return -ENOTSUPP;
}
if (op->data.nbytes) {
if (op->data.dir == SPI_MEM_DATA_IN)
rx_buf = op->data.buf.in;
else
tx_buf = op->data.buf.out;
}
op_len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
op_buf = calloc(1, op_len);
ret = spi_claim_bus(slave);
if (ret < 0)
return ret;
op_buf[pos++] = op->cmd.opcode;
if (op->addr.nbytes) {
for (i = 0; i < op->addr.nbytes; i++)
op_buf[pos + i] = op->addr.val >>
(8 * (op->addr.nbytes - i - 1));
pos += op->addr.nbytes;
}
if (op->dummy.nbytes)
memset(op_buf + pos, 0xff, op->dummy.nbytes);
/* 1st transfer: opcode + address + dummy cycles */
flag = SPI_XFER_BEGIN;
/* Make sure to set END bit if no tx or rx data messages follow */
if (!tx_buf && !rx_buf)
flag |= SPI_XFER_END;
ret = spi_xfer(slave, op_len * 8, op_buf, NULL, flag);
if (ret)
return ret;
/* 2nd transfer: rx or tx data path */
if (tx_buf || rx_buf) {
ret = spi_xfer(slave, op->data.nbytes * 8, tx_buf,
rx_buf, SPI_XFER_END);
if (ret)
return ret;
}
spi_release_bus(slave);
for (i = 0; i < pos; i++)
debug("%02x ", op_buf[i]);
debug("| [%dB %s] ",
tx_buf || rx_buf ? op->data.nbytes : 0,
tx_buf || rx_buf ? (tx_buf ? "out" : "in") : "-");
for (i = 0; i < op->data.nbytes; i++)
debug("%02x ", tx_buf ? tx_buf[i] : rx_buf[i]);
debug("[ret %d]\n", ret);
free(op_buf);
if (ret < 0)
return ret;
#endif /* __UBOOT__ */
return 0;
}
EXPORT_SYMBOL_GPL(spi_mem_exec_op);
/**
* spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
* match controller limitations
* @slave: the SPI device
* @op: the operation to adjust
*
* Some controllers have FIFO limitations and must split a data transfer
* operation into multiple ones, others require a specific alignment for
* optimized accesses. This function allows SPI mem drivers to split a single
* operation into multiple sub-operations when required.
*
* Return: a negative error code if the controller can't properly adjust @op,
* 0 otherwise. Note that @op->data.nbytes will be updated if @op
* can't be handled in a single step.
*/
int spi_mem_adjust_op_size(struct spi_slave *slave, struct spi_mem_op *op)
{
struct udevice *bus = slave->dev->parent;
struct dm_spi_ops *ops = spi_get_ops(bus);
if (ops->mem_ops && ops->mem_ops->adjust_op_size)
return ops->mem_ops->adjust_op_size(slave, op);
return 0;
}
EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
#ifndef __UBOOT__
static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
{
return container_of(drv, struct spi_mem_driver, spidrv.driver);
}
static int spi_mem_probe(struct spi_device *spi)
{
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
struct spi_mem *mem;
mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
if (!mem)
return -ENOMEM;
mem->spi = spi;
spi_set_drvdata(spi, mem);
return memdrv->probe(mem);
}
static int spi_mem_remove(struct spi_device *spi)
{
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
struct spi_mem *mem = spi_get_drvdata(spi);
if (memdrv->remove)
return memdrv->remove(mem);
return 0;
}
static void spi_mem_shutdown(struct spi_device *spi)
{
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
struct spi_mem *mem = spi_get_drvdata(spi);
if (memdrv->shutdown)
memdrv->shutdown(mem);
}
/**
* spi_mem_driver_register_with_owner() - Register a SPI memory driver
* @memdrv: the SPI memory driver to register
* @owner: the owner of this driver
*
* Registers a SPI memory driver.
*
* Return: 0 in case of success, a negative error core otherwise.
*/
int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
struct module *owner)
{
memdrv->spidrv.probe = spi_mem_probe;
memdrv->spidrv.remove = spi_mem_remove;
memdrv->spidrv.shutdown = spi_mem_shutdown;
return __spi_register_driver(owner, &memdrv->spidrv);
}
EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
/**
* spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
* @memdrv: the SPI memory driver to unregister
*
* Unregisters a SPI memory driver.
*/
void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
{
spi_unregister_driver(&memdrv->spidrv);
}
EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
#endif /* __UBOOT__ */

View file

@ -240,7 +240,7 @@
/* LB refresh timer prescal, 266MHz/32 */
#define CONFIG_SYS_LBC_MRTPR 0x20000000 /*TODO */
/* drivers/mtd/nand/nand.c */
/* drivers/mtd/nand/raw/nand.c */
#if defined(CONFIG_NAND) && defined(CONFIG_SPL_BUILD)
#define CONFIG_SYS_NAND_BASE 0xFFF00000
#else

View file

@ -15,9 +15,12 @@
#define MTD_DEV_TYPE_NOR 0x0001
#define MTD_DEV_TYPE_NAND 0x0002
#define MTD_DEV_TYPE_ONENAND 0x0004
#define MTD_DEV_TYPE_SPINAND 0x0008
#define MTD_DEV_TYPE(type) ((type == MTD_DEV_TYPE_NAND) ? "nand" : \
(type == MTD_DEV_TYPE_ONENAND) ? "onenand" : "nor")
#define MTD_DEV_TYPE(type) (type == MTD_DEV_TYPE_NAND ? "nand" : \
(type == MTD_DEV_TYPE_NOR ? "nor" : \
(type == MTD_DEV_TYPE_ONENAND ? "onenand" : \
"spi-nand"))) \
struct mtd_device {
struct list_head link;

View file

@ -20,7 +20,11 @@
#include <linux/compat.h>
#include <mtd/mtd-abi.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <div64.h>
#if IS_ENABLED(CONFIG_DM)
#include <dm/device.h>
#endif
#define MAX_MTD_DEVICES 32
#endif
@ -304,8 +308,64 @@ struct mtd_info {
struct udevice *dev;
#endif
int usecount;
/* MTD devices do not have any parent. MTD partitions do. */
struct mtd_info *parent;
/*
* Offset of the partition relatively to the parent offset.
* Is 0 for real MTD devices (ie. not partitions).
*/
u64 offset;
/*
* List node used to add an MTD partition to the parent
* partition list.
*/
struct list_head node;
/*
* List of partitions attached to this MTD device (the parent
* MTD device can itself be a partition).
*/
struct list_head partitions;
};
#if IS_ENABLED(CONFIG_DM)
static inline void mtd_set_of_node(struct mtd_info *mtd,
const struct device_node *np)
{
mtd->dev->node.np = np;
}
static inline const struct device_node *mtd_get_of_node(struct mtd_info *mtd)
{
return mtd->dev->node.np;
}
#else
struct device_node;
static inline void mtd_set_of_node(struct mtd_info *mtd,
const struct device_node *np)
{
}
static inline const struct device_node *mtd_get_of_node(struct mtd_info *mtd)
{
return NULL;
}
#endif
static inline bool mtd_is_partition(const struct mtd_info *mtd)
{
return mtd->parent;
}
static inline bool mtd_has_partitions(const struct mtd_info *mtd)
{
return !list_empty(&mtd->partitions);
}
int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobecc);
int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
@ -351,17 +411,7 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
const u_char *buf);
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops);
static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
ops->retlen = ops->oobretlen = 0;
if (!mtd->_write_oob)
return -EOPNOTSUPP;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
return mtd->_write_oob(mtd, to, ops);
}
int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops);
int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
struct otp_info *buf);
@ -515,6 +565,12 @@ int del_mtd_device(struct mtd_info *mtd);
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);
struct mtd_info *__mtd_next_device(int i);
#define mtd_for_each_device(mtd) \
for ((mtd) = __mtd_next_device(0); \
(mtd) != NULL; \
(mtd) = __mtd_next_device(mtd->index + 1))
int mtd_arg_off(const char *arg, int *idx, loff_t *off, loff_t *size,
loff_t *maxsize, int devtype, uint64_t chipsize);
int mtd_arg_off_size(int argc, char *const argv[], int *idx, loff_t *off,
@ -525,5 +581,10 @@ int mtd_arg_off_size(int argc, char *const argv[], int *idx, loff_t *off,
void mtd_get_len_incl_bad(struct mtd_info *mtd, uint64_t offset,
const uint64_t length, uint64_t *len_incl_bad,
int *truncated);
/* drivers/mtd/mtd_uboot.c */
int mtd_search_alternate_name(const char *mtdname, char *altname,
unsigned int max_len);
#endif
#endif /* __MTD_MTD_H__ */

734
include/linux/mtd/nand.h Normal file
View file

@ -0,0 +1,734 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2017 - Free Electrons
*
* Authors:
* Boris Brezillon <boris.brezillon@free-electrons.com>
* Peter Pan <peterpandong@micron.com>
*/
#ifndef __LINUX_MTD_NAND_H
#define __LINUX_MTD_NAND_H
#include <linux/mtd/mtd.h>
/**
* struct nand_memory_organization - Memory organization structure
* @bits_per_cell: number of bits per NAND cell
* @pagesize: page size
* @oobsize: OOB area size
* @pages_per_eraseblock: number of pages per eraseblock
* @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
* @planes_per_lun: number of planes per LUN
* @luns_per_target: number of LUN per target (target is a synonym for die)
* @ntargets: total number of targets exposed by the NAND device
*/
struct nand_memory_organization {
unsigned int bits_per_cell;
unsigned int pagesize;
unsigned int oobsize;
unsigned int pages_per_eraseblock;
unsigned int eraseblocks_per_lun;
unsigned int planes_per_lun;
unsigned int luns_per_target;
unsigned int ntargets;
};
#define NAND_MEMORG(bpc, ps, os, ppe, epl, ppl, lpt, nt) \
{ \
.bits_per_cell = (bpc), \
.pagesize = (ps), \
.oobsize = (os), \
.pages_per_eraseblock = (ppe), \
.eraseblocks_per_lun = (epl), \
.planes_per_lun = (ppl), \
.luns_per_target = (lpt), \
.ntargets = (nt), \
}
/**
* struct nand_row_converter - Information needed to convert an absolute offset
* into a row address
* @lun_addr_shift: position of the LUN identifier in the row address
* @eraseblock_addr_shift: position of the eraseblock identifier in the row
* address
*/
struct nand_row_converter {
unsigned int lun_addr_shift;
unsigned int eraseblock_addr_shift;
};
/**
* struct nand_pos - NAND position object
* @target: the NAND target/die
* @lun: the LUN identifier
* @plane: the plane within the LUN
* @eraseblock: the eraseblock within the LUN
* @page: the page within the LUN
*
* These information are usually used by specific sub-layers to select the
* appropriate target/die and generate a row address to pass to the device.
*/
struct nand_pos {
unsigned int target;
unsigned int lun;
unsigned int plane;
unsigned int eraseblock;
unsigned int page;
};
/**
* struct nand_page_io_req - NAND I/O request object
* @pos: the position this I/O request is targeting
* @dataoffs: the offset within the page
* @datalen: number of data bytes to read from/write to this page
* @databuf: buffer to store data in or get data from
* @ooboffs: the OOB offset within the page
* @ooblen: the number of OOB bytes to read from/write to this page
* @oobbuf: buffer to store OOB data in or get OOB data from
* @mode: one of the %MTD_OPS_XXX mode
*
* This object is used to pass per-page I/O requests to NAND sub-layers. This
* way all useful information are already formatted in a useful way and
* specific NAND layers can focus on translating these information into
* specific commands/operations.
*/
struct nand_page_io_req {
struct nand_pos pos;
unsigned int dataoffs;
unsigned int datalen;
union {
const void *out;
void *in;
} databuf;
unsigned int ooboffs;
unsigned int ooblen;
union {
const void *out;
void *in;
} oobbuf;
int mode;
};
/**
* struct nand_ecc_req - NAND ECC requirements
* @strength: ECC strength
* @step_size: ECC step/block size
*/
struct nand_ecc_req {
unsigned int strength;
unsigned int step_size;
};
#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
/**
* struct nand_bbt - bad block table object
* @cache: in memory BBT cache
*/
struct nand_bbt {
unsigned long *cache;
};
struct nand_device;
/**
* struct nand_ops - NAND operations
* @erase: erase a specific block. No need to check if the block is bad before
* erasing, this has been taken care of by the generic NAND layer
* @markbad: mark a specific block bad. No need to check if the block is
* already marked bad, this has been taken care of by the generic
* NAND layer. This method should just write the BBM (Bad Block
* Marker) so that future call to struct_nand_ops->isbad() return
* true
* @isbad: check whether a block is bad or not. This method should just read
* the BBM and return whether the block is bad or not based on what it
* reads
*
* These are all low level operations that should be implemented by specialized
* NAND layers (SPI NAND, raw NAND, ...).
*/
struct nand_ops {
int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
};
/**
* struct nand_device - NAND device
* @mtd: MTD instance attached to the NAND device
* @memorg: memory layout
* @eccreq: ECC requirements
* @rowconv: position to row address converter
* @bbt: bad block table info
* @ops: NAND operations attached to the NAND device
*
* Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
* should declare their own NAND object embedding a nand_device struct (that's
* how inheritance is done).
* struct_nand_device->memorg and struct_nand_device->eccreq should be filled
* at device detection time to reflect the NAND device
* capabilities/requirements. Once this is done nanddev_init() can be called.
* It will take care of converting NAND information into MTD ones, which means
* the specialized NAND layers should never manually tweak
* struct_nand_device->mtd except for the ->_read/write() hooks.
*/
struct nand_device {
struct mtd_info *mtd;
struct nand_memory_organization memorg;
struct nand_ecc_req eccreq;
struct nand_row_converter rowconv;
struct nand_bbt bbt;
const struct nand_ops *ops;
};
/**
* struct nand_io_iter - NAND I/O iterator
* @req: current I/O request
* @oobbytes_per_page: maximum number of OOB bytes per page
* @dataleft: remaining number of data bytes to read/write
* @oobleft: remaining number of OOB bytes to read/write
*
* Can be used by specialized NAND layers to iterate over all pages covered
* by an MTD I/O request, which should greatly simplifies the boiler-plate
* code needed to read/write data from/to a NAND device.
*/
struct nand_io_iter {
struct nand_page_io_req req;
unsigned int oobbytes_per_page;
unsigned int dataleft;
unsigned int oobleft;
};
/**
* mtd_to_nanddev() - Get the NAND device attached to the MTD instance
* @mtd: MTD instance
*
* Return: the NAND device embedding @mtd.
*/
static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
{
return mtd->priv;
}
/**
* nanddev_to_mtd() - Get the MTD device attached to a NAND device
* @nand: NAND device
*
* Return: the MTD device embedded in @nand.
*/
static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
{
return nand->mtd;
}
/*
* nanddev_bits_per_cell() - Get the number of bits per cell
* @nand: NAND device
*
* Return: the number of bits per cell.
*/
static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
{
return nand->memorg.bits_per_cell;
}
/**
* nanddev_page_size() - Get NAND page size
* @nand: NAND device
*
* Return: the page size.
*/
static inline size_t nanddev_page_size(const struct nand_device *nand)
{
return nand->memorg.pagesize;
}
/**
* nanddev_per_page_oobsize() - Get NAND OOB size
* @nand: NAND device
*
* Return: the OOB size.
*/
static inline unsigned int
nanddev_per_page_oobsize(const struct nand_device *nand)
{
return nand->memorg.oobsize;
}
/**
* nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
* @nand: NAND device
*
* Return: the number of pages per eraseblock.
*/
static inline unsigned int
nanddev_pages_per_eraseblock(const struct nand_device *nand)
{
return nand->memorg.pages_per_eraseblock;
}
/**
* nanddev_per_page_oobsize() - Get NAND erase block size
* @nand: NAND device
*
* Return: the eraseblock size.
*/
static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
{
return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
}
/**
* nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
* @nand: NAND device
*
* Return: the number of eraseblocks per LUN.
*/
static inline unsigned int
nanddev_eraseblocks_per_lun(const struct nand_device *nand)
{
return nand->memorg.eraseblocks_per_lun;
}
/**
* nanddev_target_size() - Get the total size provided by a single target/die
* @nand: NAND device
*
* Return: the total size exposed by a single target/die in bytes.
*/
static inline u64 nanddev_target_size(const struct nand_device *nand)
{
return (u64)nand->memorg.luns_per_target *
nand->memorg.eraseblocks_per_lun *
nand->memorg.pages_per_eraseblock *
nand->memorg.pagesize;
}
/**
* nanddev_ntarget() - Get the total of targets
* @nand: NAND device
*
* Return: the number of targets/dies exposed by @nand.
*/
static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
{
return nand->memorg.ntargets;
}
/**
* nanddev_neraseblocks() - Get the total number of erasablocks
* @nand: NAND device
*
* Return: the total number of eraseblocks exposed by @nand.
*/
static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
{
return (u64)nand->memorg.luns_per_target *
nand->memorg.eraseblocks_per_lun *
nand->memorg.pages_per_eraseblock;
}
/**
* nanddev_size() - Get NAND size
* @nand: NAND device
*
* Return: the total size (in bytes) exposed by @nand.
*/
static inline u64 nanddev_size(const struct nand_device *nand)
{
return nanddev_target_size(nand) * nanddev_ntargets(nand);
}
/**
* nanddev_get_memorg() - Extract memory organization info from a NAND device
* @nand: NAND device
*
* This can be used by the upper layer to fill the memorg info before calling
* nanddev_init().
*
* Return: the memorg object embedded in the NAND device.
*/
static inline struct nand_memory_organization *
nanddev_get_memorg(struct nand_device *nand)
{
return &nand->memorg;
}
int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
struct module *owner);
void nanddev_cleanup(struct nand_device *nand);
/**
* nanddev_register() - Register a NAND device
* @nand: NAND device
*
* Register a NAND device.
* This function is just a wrapper around mtd_device_register()
* registering the MTD device embedded in @nand.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
static inline int nanddev_register(struct nand_device *nand)
{
return mtd_device_register(nand->mtd, NULL, 0);
}
/**
* nanddev_unregister() - Unregister a NAND device
* @nand: NAND device
*
* Unregister a NAND device.
* This function is just a wrapper around mtd_device_unregister()
* unregistering the MTD device embedded in @nand.
*
* Return: 0 in case of success, a negative error code otherwise.
*/
static inline int nanddev_unregister(struct nand_device *nand)
{
return mtd_device_unregister(nand->mtd);
}
/**
* nanddev_set_of_node() - Attach a DT node to a NAND device
* @nand: NAND device
* @np: DT node
*
* Attach a DT node to a NAND device.
*/
static inline void nanddev_set_of_node(struct nand_device *nand,
const struct device_node *np)
{
mtd_set_of_node(nand->mtd, np);
}
/**
* nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
* @nand: NAND device
*
* Return: the DT node attached to @nand.
*/
static inline const struct device_node *nanddev_get_of_node(struct nand_device *nand)
{
return mtd_get_of_node(nand->mtd);
}
/**
* nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
* @nand: NAND device
* @offs: absolute NAND offset (usually passed by the MTD layer)
* @pos: a NAND position object to fill in
*
* Converts @offs into a nand_pos representation.
*
* Return: the offset within the NAND page pointed by @pos.
*/
static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
loff_t offs,
struct nand_pos *pos)
{
unsigned int pageoffs;
u64 tmp = offs;
pageoffs = do_div(tmp, nand->memorg.pagesize);
pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
pos->lun = do_div(tmp, nand->memorg.luns_per_target);
pos->target = tmp;
return pageoffs;
}
/**
* nanddev_pos_cmp() - Compare two NAND positions
* @a: First NAND position
* @b: Second NAND position
*
* Compares two NAND positions.
*
* Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
*/
static inline int nanddev_pos_cmp(const struct nand_pos *a,
const struct nand_pos *b)
{
if (a->target != b->target)
return a->target < b->target ? -1 : 1;
if (a->lun != b->lun)
return a->lun < b->lun ? -1 : 1;
if (a->eraseblock != b->eraseblock)
return a->eraseblock < b->eraseblock ? -1 : 1;
if (a->page != b->page)
return a->page < b->page ? -1 : 1;
return 0;
}
/**
* nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
* @nand: NAND device
* @pos: the NAND position to convert
*
* Converts @pos NAND position into an absolute offset.
*
* Return: the absolute offset. Note that @pos points to the beginning of a
* page, if one wants to point to a specific offset within this page
* the returned offset has to be adjusted manually.
*/
static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
const struct nand_pos *pos)
{
unsigned int npages;
npages = pos->page +
((pos->eraseblock +
(pos->lun +
(pos->target * nand->memorg.luns_per_target)) *
nand->memorg.eraseblocks_per_lun) *
nand->memorg.pages_per_eraseblock);
return (loff_t)npages * nand->memorg.pagesize;
}
/**
* nanddev_pos_to_row() - Extract a row address from a NAND position
* @nand: NAND device
* @pos: the position to convert
*
* Converts a NAND position into a row address that can then be passed to the
* device.
*
* Return: the row address extracted from @pos.
*/
static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
const struct nand_pos *pos)
{
return (pos->lun << nand->rowconv.lun_addr_shift) |
(pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
pos->page;
}
/**
* nanddev_pos_next_target() - Move a position to the next target/die
* @nand: NAND device
* @pos: the position to update
*
* Updates @pos to point to the start of the next target/die. Useful when you
* want to iterate over all targets/dies of a NAND device.
*/
static inline void nanddev_pos_next_target(struct nand_device *nand,
struct nand_pos *pos)
{
pos->page = 0;
pos->plane = 0;
pos->eraseblock = 0;
pos->lun = 0;
pos->target++;
}
/**
* nanddev_pos_next_lun() - Move a position to the next LUN
* @nand: NAND device
* @pos: the position to update
*
* Updates @pos to point to the start of the next LUN. Useful when you want to
* iterate over all LUNs of a NAND device.
*/
static inline void nanddev_pos_next_lun(struct nand_device *nand,
struct nand_pos *pos)
{
if (pos->lun >= nand->memorg.luns_per_target - 1)
return nanddev_pos_next_target(nand, pos);
pos->lun++;
pos->page = 0;
pos->plane = 0;
pos->eraseblock = 0;
}
/**
* nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
* @nand: NAND device
* @pos: the position to update
*
* Updates @pos to point to the start of the next eraseblock. Useful when you
* want to iterate over all eraseblocks of a NAND device.
*/
static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
struct nand_pos *pos)
{
if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
return nanddev_pos_next_lun(nand, pos);
pos->eraseblock++;
pos->page = 0;
pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
}
/**
* nanddev_pos_next_eraseblock() - Move a position to the next page
* @nand: NAND device
* @pos: the position to update
*
* Updates @pos to point to the start of the next page. Useful when you want to
* iterate over all pages of a NAND device.
*/
static inline void nanddev_pos_next_page(struct nand_device *nand,
struct nand_pos *pos)
{
if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
return nanddev_pos_next_eraseblock(nand, pos);
pos->page++;
}
/**
* nand_io_iter_init - Initialize a NAND I/O iterator
* @nand: NAND device
* @offs: absolute offset
* @req: MTD request
* @iter: NAND I/O iterator
*
* Initializes a NAND iterator based on the information passed by the MTD
* layer.
*/
static inline void nanddev_io_iter_init(struct nand_device *nand,
loff_t offs, struct mtd_oob_ops *req,
struct nand_io_iter *iter)
{
struct mtd_info *mtd = nanddev_to_mtd(nand);
iter->req.mode = req->mode;
iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
iter->req.ooboffs = req->ooboffs;
iter->oobbytes_per_page = mtd_oobavail(mtd, req);
iter->dataleft = req->len;
iter->oobleft = req->ooblen;
iter->req.databuf.in = req->datbuf;
iter->req.datalen = min_t(unsigned int,
nand->memorg.pagesize - iter->req.dataoffs,
iter->dataleft);
iter->req.oobbuf.in = req->oobbuf;
iter->req.ooblen = min_t(unsigned int,
iter->oobbytes_per_page - iter->req.ooboffs,
iter->oobleft);
}
/**
* nand_io_iter_next_page - Move to the next page
* @nand: NAND device
* @iter: NAND I/O iterator
*
* Updates the @iter to point to the next page.
*/
static inline void nanddev_io_iter_next_page(struct nand_device *nand,
struct nand_io_iter *iter)
{
nanddev_pos_next_page(nand, &iter->req.pos);
iter->dataleft -= iter->req.datalen;
iter->req.databuf.in += iter->req.datalen;
iter->oobleft -= iter->req.ooblen;
iter->req.oobbuf.in += iter->req.ooblen;
iter->req.dataoffs = 0;
iter->req.ooboffs = 0;
iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
iter->dataleft);
iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
iter->oobleft);
}
/**
* nand_io_iter_end - Should end iteration or not
* @nand: NAND device
* @iter: NAND I/O iterator
*
* Check whether @iter has reached the end of the NAND portion it was asked to
* iterate on or not.
*
* Return: true if @iter has reached the end of the iteration request, false
* otherwise.
*/
static inline bool nanddev_io_iter_end(struct nand_device *nand,
const struct nand_io_iter *iter)
{
if (iter->dataleft || iter->oobleft)
return false;
return true;
}
/**
* nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
* request
* @nand: NAND device
* @start: start address to read/write from
* @req: MTD I/O request
* @iter: NAND I/O iterator
*
* Should be used for iterate over pages that are contained in an MTD request.
*/
#define nanddev_io_for_each_page(nand, start, req, iter) \
for (nanddev_io_iter_init(nand, start, req, iter); \
!nanddev_io_iter_end(nand, iter); \
nanddev_io_iter_next_page(nand, iter))
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
/* BBT related functions */
enum nand_bbt_block_status {
NAND_BBT_BLOCK_STATUS_UNKNOWN,
NAND_BBT_BLOCK_GOOD,
NAND_BBT_BLOCK_WORN,
NAND_BBT_BLOCK_RESERVED,
NAND_BBT_BLOCK_FACTORY_BAD,
NAND_BBT_BLOCK_NUM_STATUS,
};
int nanddev_bbt_init(struct nand_device *nand);
void nanddev_bbt_cleanup(struct nand_device *nand);
int nanddev_bbt_update(struct nand_device *nand);
int nanddev_bbt_get_block_status(const struct nand_device *nand,
unsigned int entry);
int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
enum nand_bbt_block_status status);
int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
/**
* nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
* @nand: NAND device
* @pos: the NAND position we want to get BBT entry for
*
* Return the BBT entry used to store information about the eraseblock pointed
* by @pos.
*
* Return: the BBT entry storing information about eraseblock pointed by @pos.
*/
static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
const struct nand_pos *pos)
{
return pos->eraseblock +
((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
nand->memorg.eraseblocks_per_lun);
}
/**
* nanddev_bbt_is_initialized() - Check if the BBT has been initialized
* @nand: NAND device
*
* Return: true if the BBT has been initialized, false otherwise.
*/
static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
{
return !!nand->bbt.cache;
}
/* MTD -> NAND helper functions. */
int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
#endif /* __LINUX_MTD_NAND_H */

View file

@ -81,10 +81,30 @@ extern void register_mtd_parser(struct mtd_part_parser *parser);
extern void deregister_mtd_parser(struct mtd_part_parser *parser);
#endif
int mtd_is_partition(const struct mtd_info *mtd);
int mtd_add_partition(struct mtd_info *master, const char *name,
long long offset, long long length);
int mtd_del_partition(struct mtd_info *master, int partno);
uint64_t mtd_get_device_size(const struct mtd_info *mtd);
#if defined(CONFIG_MTD_PARTITIONS)
int mtd_parse_partitions(struct mtd_info *parent, const char **_mtdparts,
struct mtd_partition **_parts, int *_nparts);
void mtd_free_parsed_partitions(struct mtd_partition *parts,
unsigned int nparts);
#else
static inline int
mtd_parse_partitions(struct mtd_info *parent, const char **_mtdparts,
struct mtd_partition **_parts, int *_nparts)
{
*_nparts = 0;
return 0;
}
static inline void
mtd_free_parsed_partitions(struct mtd_partition *parts, unsigned int nparts)
{
return;
}
#endif /* defined(MTD_PARTITIONS) */
#endif

432
include/linux/mtd/spinand.h Normal file
View file

@ -0,0 +1,432 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016-2017 Micron Technology, Inc.
*
* Authors:
* Peter Pan <peterpandong@micron.com>
*/
#ifndef __LINUX_MTD_SPINAND_H
#define __LINUX_MTD_SPINAND_H
#ifndef __UBOOT__
#include <linux/mutex.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#else
#include <common.h>
#include <spi.h>
#include <spi-mem.h>
#include <linux/mtd/nand.h>
#endif
/**
* Standard SPI NAND flash operations
*/
#define SPINAND_RESET_OP \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
#define SPINAND_WR_EN_DIS_OP(enable) \
SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
#define SPINAND_READID_OP(ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1))
#define SPINAND_SET_FEATURE_OP(reg, valptr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \
SPI_MEM_OP_ADDR(1, reg, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(1, valptr, 1))
#define SPINAND_GET_FEATURE_OP(reg, valptr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \
SPI_MEM_OP_ADDR(1, reg, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_IN(1, valptr, 1))
#define SPINAND_BLK_ERASE_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
#define SPINAND_PAGE_READ_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1))
#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 2))
#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 4))
#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
SPI_MEM_OP_ADDR(2, addr, 2), \
SPI_MEM_OP_DUMMY(ndummy, 2), \
SPI_MEM_OP_DATA_IN(len, buf, 2))
#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
SPI_MEM_OP_ADDR(2, addr, 4), \
SPI_MEM_OP_DUMMY(ndummy, 4), \
SPI_MEM_OP_DATA_IN(len, buf, 4))
#define SPINAND_PROG_EXEC_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
#define SPINAND_PROG_LOAD(reset, addr, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(len, buf, 1))
#define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(len, buf, 4))
/**
* Standard SPI NAND flash commands
*/
#define SPINAND_CMD_PROG_LOAD_X4 0x32
#define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34
/* feature register */
#define REG_BLOCK_LOCK 0xa0
#define BL_ALL_UNLOCKED 0x00
/* configuration register */
#define REG_CFG 0xb0
#define CFG_OTP_ENABLE BIT(6)
#define CFG_ECC_ENABLE BIT(4)
#define CFG_QUAD_ENABLE BIT(0)
/* status register */
#define REG_STATUS 0xc0
#define STATUS_BUSY BIT(0)
#define STATUS_ERASE_FAILED BIT(2)
#define STATUS_PROG_FAILED BIT(3)
#define STATUS_ECC_MASK GENMASK(5, 4)
#define STATUS_ECC_NO_BITFLIPS (0 << 4)
#define STATUS_ECC_HAS_BITFLIPS (1 << 4)
#define STATUS_ECC_UNCOR_ERROR (2 << 4)
struct spinand_op;
struct spinand_device;
#define SPINAND_MAX_ID_LEN 4
/**
* struct spinand_id - SPI NAND id structure
* @data: buffer containing the id bytes. Currently 4 bytes large, but can
* be extended if required
* @len: ID length
*
* struct_spinand_id->data contains all bytes returned after a READ_ID command,
* including dummy bytes if the chip does not emit ID bytes right after the
* READ_ID command. The responsibility to extract real ID bytes is left to
* struct_manufacurer_ops->detect().
*/
struct spinand_id {
u8 data[SPINAND_MAX_ID_LEN];
int len;
};
/**
* struct manufacurer_ops - SPI NAND manufacturer specific operations
* @detect: detect a SPI NAND device. Every time a SPI NAND device is probed
* the core calls the struct_manufacurer_ops->detect() hook of each
* registered manufacturer until one of them return 1. Note that
* the first thing to check in this hook is that the manufacturer ID
* in struct_spinand_device->id matches the manufacturer whose
* ->detect() hook has been called. Should return 1 if there's a
* match, 0 if the manufacturer ID does not match and a negative
* error code otherwise. When true is returned, the core assumes
* that properties of the NAND chip (spinand->base.memorg and
* spinand->base.eccreq) have been filled
* @init: initialize a SPI NAND device
* @cleanup: cleanup a SPI NAND device
*
* Each SPI NAND manufacturer driver should implement this interface so that
* NAND chips coming from this vendor can be detected and initialized properly.
*/
struct spinand_manufacturer_ops {
int (*detect)(struct spinand_device *spinand);
int (*init)(struct spinand_device *spinand);
void (*cleanup)(struct spinand_device *spinand);
};
/**
* struct spinand_manufacturer - SPI NAND manufacturer instance
* @id: manufacturer ID
* @name: manufacturer name
* @ops: manufacturer operations
*/
struct spinand_manufacturer {
u8 id;
char *name;
const struct spinand_manufacturer_ops *ops;
};
/* SPI NAND manufacturers */
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;
extern const struct spinand_manufacturer winbond_spinand_manufacturer;
/**
* struct spinand_op_variants - SPI NAND operation variants
* @ops: the list of variants for a given operation
* @nops: the number of variants
*
* Some operations like read-from-cache/write-to-cache have several variants
* depending on the number of IO lines you use to transfer data or address
* cycles. This structure is a way to describe the different variants supported
* by a chip and let the core pick the best one based on the SPI mem controller
* capabilities.
*/
struct spinand_op_variants {
const struct spi_mem_op *ops;
unsigned int nops;
};
#define SPINAND_OP_VARIANTS(name, ...) \
const struct spinand_op_variants name = { \
.ops = (struct spi_mem_op[]) { __VA_ARGS__ }, \
.nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) / \
sizeof(struct spi_mem_op), \
}
/**
* spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND
* chip
* @get_status: get the ECC status. Should return a positive number encoding
* the number of corrected bitflips if correction was possible or
* -EBADMSG if there are uncorrectable errors. I can also return
* other negative error codes if the error is not caused by
* uncorrectable bitflips
* @ooblayout: the OOB layout used by the on-die ECC implementation
*/
struct spinand_ecc_info {
int (*get_status)(struct spinand_device *spinand, u8 status);
const struct mtd_ooblayout_ops *ooblayout;
};
#define SPINAND_HAS_QE_BIT BIT(0)
/**
* struct spinand_info - Structure used to describe SPI NAND chips
* @model: model name
* @devid: device ID
* @flags: OR-ing of the SPINAND_XXX flags
* @memorg: memory organization
* @eccreq: ECC requirements
* @eccinfo: on-die ECC info
* @op_variants: operations variants
* @op_variants.read_cache: variants of the read-cache operation
* @op_variants.write_cache: variants of the write-cache operation
* @op_variants.update_cache: variants of the update-cache operation
* @select_target: function used to select a target/die. Required only for
* multi-die chips
*
* Each SPI NAND manufacturer driver should have a spinand_info table
* describing all the chips supported by the driver.
*/
struct spinand_info {
const char *model;
u8 devid;
u32 flags;
struct nand_memory_organization memorg;
struct nand_ecc_req eccreq;
struct spinand_ecc_info eccinfo;
struct {
const struct spinand_op_variants *read_cache;
const struct spinand_op_variants *write_cache;
const struct spinand_op_variants *update_cache;
} op_variants;
int (*select_target)(struct spinand_device *spinand,
unsigned int target);
};
#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \
{ \
.read_cache = __read, \
.write_cache = __write, \
.update_cache = __update, \
}
#define SPINAND_ECCINFO(__ooblayout, __get_status) \
.eccinfo = { \
.ooblayout = __ooblayout, \
.get_status = __get_status, \
}
#define SPINAND_SELECT_TARGET(__func) \
.select_target = __func,
#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
__flags, ...) \
{ \
.model = __model, \
.devid = __id, \
.memorg = __memorg, \
.eccreq = __eccreq, \
.op_variants = __op_variants, \
.flags = __flags, \
__VA_ARGS__ \
}
/**
* struct spinand_device - SPI NAND device instance
* @base: NAND device instance
* @slave: pointer to the SPI slave object
* @lock: lock used to serialize accesses to the NAND
* @id: NAND ID as returned by READ_ID
* @flags: NAND flags
* @op_templates: various SPI mem op templates
* @op_templates.read_cache: read cache op template
* @op_templates.write_cache: write cache op template
* @op_templates.update_cache: update cache op template
* @select_target: select a specific target/die. Usually called before sending
* a command addressing a page or an eraseblock embedded in
* this die. Only required if your chip exposes several dies
* @cur_target: currently selected target/die
* @eccinfo: on-die ECC information
* @cfg_cache: config register cache. One entry per die
* @databuf: bounce buffer for data
* @oobbuf: bounce buffer for OOB data
* @scratchbuf: buffer used for everything but page accesses. This is needed
* because the spi-mem interface explicitly requests that buffers
* passed in spi_mem_op be DMA-able, so we can't based the bufs on
* the stack
* @manufacturer: SPI NAND manufacturer information
* @priv: manufacturer private data
*/
struct spinand_device {
struct nand_device base;
#ifndef __UBOOT__
struct spi_mem *spimem;
struct mutex lock;
#else
struct spi_slave *slave;
#endif
struct spinand_id id;
u32 flags;
struct {
const struct spi_mem_op *read_cache;
const struct spi_mem_op *write_cache;
const struct spi_mem_op *update_cache;
} op_templates;
int (*select_target)(struct spinand_device *spinand,
unsigned int target);
unsigned int cur_target;
struct spinand_ecc_info eccinfo;
u8 *cfg_cache;
u8 *databuf;
u8 *oobbuf;
u8 *scratchbuf;
const struct spinand_manufacturer *manufacturer;
void *priv;
};
/**
* mtd_to_spinand() - Get the SPI NAND device attached to an MTD instance
* @mtd: MTD instance
*
* Return: the SPI NAND device attached to @mtd.
*/
static inline struct spinand_device *mtd_to_spinand(struct mtd_info *mtd)
{
return container_of(mtd_to_nanddev(mtd), struct spinand_device, base);
}
/**
* spinand_to_mtd() - Get the MTD device embedded in a SPI NAND device
* @spinand: SPI NAND device
*
* Return: the MTD device embedded in @spinand.
*/
static inline struct mtd_info *spinand_to_mtd(struct spinand_device *spinand)
{
return nanddev_to_mtd(&spinand->base);
}
/**
* nand_to_spinand() - Get the SPI NAND device embedding an NAND object
* @nand: NAND object
*
* Return: the SPI NAND device embedding @nand.
*/
static inline struct spinand_device *nand_to_spinand(struct nand_device *nand)
{
return container_of(nand, struct spinand_device, base);
}
/**
* spinand_to_nand() - Get the NAND device embedded in a SPI NAND object
* @spinand: SPI NAND device
*
* Return: the NAND device embedded in @spinand.
*/
static inline struct nand_device *
spinand_to_nand(struct spinand_device *spinand)
{
return &spinand->base;
}
/**
* spinand_set_of_node - Attach a DT node to a SPI NAND device
* @spinand: SPI NAND device
* @np: DT node
*
* Attach a DT node to a SPI NAND device.
*/
static inline void spinand_set_of_node(struct spinand_device *spinand,
const struct device_node *np)
{
nanddev_set_of_node(&spinand->base, np);
}
int spinand_match_and_init(struct spinand_device *dev,
const struct spinand_info *table,
unsigned int table_size, u8 devid);
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
int spinand_select_target(struct spinand_device *spinand, unsigned int target);
#endif /* __LINUX_MTD_SPINAND_H */

Some files were not shown because too many files have changed in this diff Show more