mirror of
https://github.com/AsahiLinux/u-boot
synced 2024-11-18 18:59:44 +00:00
783a15b351
The relevant changes to the already existing GD5F1GQ4UExxG support has been determined by consulting the GigaDevice product change notice AN-0392-10, version 1.0 from November 30, 2020. As the overlaps are huge, variable names have been generalized accordingly. Apart form the lowered ECC strength (4 instead of 8 bits per 512 bytes), the new device ID, and the extra quad IO dummy byte, no changes had to be taken into account. New hardware features are not supported, namely: - Power on reset - Unique ID - Double transfer rate (DTR) - Parameter page - Random data quad IO The inverted semantic of the "driver strength" register bits, defaulting to 100% instead of 50% for the Q5 devices, got ignored as the driver has never touched them anyway. The no longer supported "read from cache during block erase" functionality is not reflected as the current SPI NAND core does not support it anyway. Implementation has been tested on MediaTek MT7688 based GARDENA smart Gateways using both, GigaDevice GD5F1GQ5UEYIG and GD5F1GQ4UBYIG. Signed-off-by: Reto Schneider <reto.schneider@husqvarnagroup.com> Reviewed-by: Stefan Roese <sr@denx.de> Acked-by: Jagan Teki <jagan@amarulasolutions.com>
210 lines
5.4 KiB
C
210 lines
5.4 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) 2018 Stefan Roese <sr@denx.de>
|
|
*
|
|
* Derived from drivers/mtd/nand/spi/micron.c
|
|
* Copyright (c) 2016-2017 Micron Technology, Inc.
|
|
*/
|
|
|
|
#ifndef __UBOOT__
|
|
#include <malloc.h>
|
|
#include <linux/device.h>
|
|
#include <linux/kernel.h>
|
|
#endif
|
|
#include <linux/mtd/spinand.h>
|
|
|
|
#define SPINAND_MFR_GIGADEVICE 0xC8
|
|
#define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4)
|
|
#define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4)
|
|
|
|
#define GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS (1 << 4)
|
|
#define GD5FXGQ5XE_STATUS_ECC_4_BITFLIPS (3 << 4)
|
|
|
|
#define GD5FXGQXXEXXG_REG_STATUS2 0xf0
|
|
|
|
/* Q4 devices, QUADIO: Dummy bytes valid for 1 and 2 GBit variants */
|
|
static SPINAND_OP_VARIANTS(gd5fxgq4_read_cache_variants,
|
|
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
|
|
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
|
|
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
|
|
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
|
|
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
|
|
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
|
|
|
|
/* Q5 devices, QUADIO: Dummy bytes only valid for 1 GBit variants */
|
|
static SPINAND_OP_VARIANTS(gd5f1gq5_read_cache_variants,
|
|
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
|
|
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
|
|
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
|
|
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
|
|
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
|
|
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
|
|
|
|
static SPINAND_OP_VARIANTS(write_cache_variants,
|
|
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
|
|
SPINAND_PROG_LOAD(true, 0, NULL, 0));
|
|
|
|
static SPINAND_OP_VARIANTS(update_cache_variants,
|
|
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
|
|
SPINAND_PROG_LOAD(false, 0, NULL, 0));
|
|
|
|
static int gd5fxgqxxexxg_ooblayout_ecc(struct mtd_info *mtd, int section,
|
|
struct mtd_oob_region *region)
|
|
{
|
|
if (section)
|
|
return -ERANGE;
|
|
|
|
region->offset = 64;
|
|
region->length = 64;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int gd5fxgqxxexxg_ooblayout_free(struct mtd_info *mtd, int section,
|
|
struct mtd_oob_region *region)
|
|
{
|
|
if (section)
|
|
return -ERANGE;
|
|
|
|
/* Reserve 1 bytes for the BBM. */
|
|
region->offset = 1;
|
|
region->length = 63;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int gd5fxgq4xexxg_ecc_get_status(struct spinand_device *spinand,
|
|
u8 status)
|
|
{
|
|
u8 status2;
|
|
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
|
|
&status2);
|
|
int ret;
|
|
|
|
switch (status & STATUS_ECC_MASK) {
|
|
case STATUS_ECC_NO_BITFLIPS:
|
|
return 0;
|
|
|
|
case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
|
|
/*
|
|
* Read status2 register to determine a more fine grained
|
|
* bit error status
|
|
*/
|
|
ret = spi_mem_exec_op(spinand->slave, &op);
|
|
if (ret)
|
|
return ret;
|
|
|
|
/*
|
|
* 4 ... 7 bits are flipped (1..4 can't be detected, so
|
|
* report the maximum of 4 in this case
|
|
*/
|
|
/* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
|
|
return ((status & STATUS_ECC_MASK) >> 2) |
|
|
((status2 & STATUS_ECC_MASK) >> 4);
|
|
|
|
case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
|
|
return 8;
|
|
|
|
case STATUS_ECC_UNCOR_ERROR:
|
|
return -EBADMSG;
|
|
|
|
default:
|
|
break;
|
|
}
|
|
|
|
return -EINVAL;
|
|
}
|
|
|
|
static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
|
|
u8 status)
|
|
{
|
|
u8 status2;
|
|
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
|
|
&status2);
|
|
int ret;
|
|
|
|
switch (status & STATUS_ECC_MASK) {
|
|
case STATUS_ECC_NO_BITFLIPS:
|
|
return 0;
|
|
|
|
case GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS:
|
|
/*
|
|
* Read status2 register to determine a more fine grained
|
|
* bit error status
|
|
*/
|
|
ret = spi_mem_exec_op(spinand->slave, &op);
|
|
if (ret)
|
|
return ret;
|
|
|
|
/*
|
|
* 1 ... 4 bits are flipped (and corrected)
|
|
*/
|
|
/* bits sorted this way (1...0): ECCSE1, ECCSE0 */
|
|
return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
|
|
|
|
case STATUS_ECC_UNCOR_ERROR:
|
|
return -EBADMSG;
|
|
|
|
default:
|
|
break;
|
|
}
|
|
|
|
return -EINVAL;
|
|
}
|
|
|
|
static const struct mtd_ooblayout_ops gd5fxgqxxexxg_ooblayout = {
|
|
.ecc = gd5fxgqxxexxg_ooblayout_ecc,
|
|
.rfree = gd5fxgqxxexxg_ooblayout_free,
|
|
};
|
|
|
|
static const struct spinand_info gigadevice_spinand_table[] = {
|
|
SPINAND_INFO("GD5F1GQ4UExxG", 0xd1,
|
|
NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
|
|
NAND_ECCREQ(8, 512),
|
|
SPINAND_INFO_OP_VARIANTS(&gd5fxgq4_read_cache_variants,
|
|
&write_cache_variants,
|
|
&update_cache_variants),
|
|
0,
|
|
SPINAND_ECCINFO(&gd5fxgqxxexxg_ooblayout,
|
|
gd5fxgq4xexxg_ecc_get_status)),
|
|
SPINAND_INFO("GD5F1GQ5UExxG", 0x51,
|
|
NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
|
|
NAND_ECCREQ(4, 512),
|
|
SPINAND_INFO_OP_VARIANTS(&gd5f1gq5_read_cache_variants,
|
|
&write_cache_variants,
|
|
&update_cache_variants),
|
|
0,
|
|
SPINAND_ECCINFO(&gd5fxgqxxexxg_ooblayout,
|
|
gd5fxgq5xexxg_ecc_get_status)),
|
|
};
|
|
|
|
static int gigadevice_spinand_detect(struct spinand_device *spinand)
|
|
{
|
|
u8 *id = spinand->id.data;
|
|
int ret;
|
|
|
|
/*
|
|
* For GD NANDs, There is an address byte needed to shift in before IDs
|
|
* are read out, so the first byte in raw_id is dummy.
|
|
*/
|
|
if (id[1] != SPINAND_MFR_GIGADEVICE)
|
|
return 0;
|
|
|
|
ret = spinand_match_and_init(spinand, gigadevice_spinand_table,
|
|
ARRAY_SIZE(gigadevice_spinand_table),
|
|
id[2]);
|
|
if (ret)
|
|
return ret;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
|
|
.detect = gigadevice_spinand_detect,
|
|
};
|
|
|
|
const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
|
|
.id = SPINAND_MFR_GIGADEVICE,
|
|
.name = "GigaDevice",
|
|
.ops = &gigadevice_spinand_manuf_ops,
|
|
};
|