Merge branch '2023-08-24-further-ufs-updates' into next

- Cleanup and improve the UFS subsystem slightly, in preparation for
  supporting more platforms.
This commit is contained in:
Tom Rini 2023-08-24 15:39:55 -04:00
commit 6de1a3eb80
2 changed files with 97 additions and 11 deletions

View file

@ -8,6 +8,7 @@
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
*/
#include <bouncebuf.h>
#include <charset.h>
#include <common.h>
#include <dm.h>
@ -691,14 +692,30 @@ static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
}
/**
* ufshcd_cache_flush_and_invalidate - Flush and invalidate cache
*
* Flush and invalidate cache in aligned address..address+size range.
* The invalidation is in place to avoid stale data in cache.
*/
static void ufshcd_cache_flush_and_invalidate(void *addr, unsigned long size)
{
uintptr_t aaddr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1);
unsigned long asize = ALIGN(size, ARCH_DMA_MINALIGN);
flush_dcache_range(aaddr, aaddr + asize);
invalidate_dcache_range(aaddr, aaddr + asize);
}
/**
* ufshcd_prepare_req_desc_hdr() - Fills the requests header
* descriptor according to request
*/
static void ufshcd_prepare_req_desc_hdr(struct utp_transfer_req_desc *req_desc,
static void ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
u32 *upiu_flags,
enum dma_data_direction cmd_dir)
{
struct utp_transfer_req_desc *req_desc = hba->utrdl;
u32 data_direction;
u32 dword_0;
@ -733,6 +750,8 @@ static void ufshcd_prepare_req_desc_hdr(struct utp_transfer_req_desc *req_desc,
req_desc->header.dword_3 = 0;
req_desc->prd_table_length = 0;
ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc));
}
static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
@ -761,10 +780,15 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE);
/* Copy the Descriptor */
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) {
memcpy(ucd_req_ptr + 1, query->descriptor, len);
ufshcd_cache_flush_and_invalidate(ucd_req_ptr, 2 * sizeof(*ucd_req_ptr));
} else {
ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr));
}
memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
}
static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba)
@ -781,6 +805,9 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba)
ucd_req_ptr->header.dword_2 = 0;
memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr));
ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
}
/**
@ -792,11 +819,10 @@ static int ufshcd_comp_devman_upiu(struct ufs_hba *hba,
{
u32 upiu_flags;
int ret = 0;
struct utp_transfer_req_desc *req_desc = hba->utrdl;
hba->dev_cmd.type = cmd_type;
ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, DMA_NONE);
ufshcd_prepare_req_desc_hdr(hba, &upiu_flags, DMA_NONE);
switch (cmd_type) {
case DEV_CMD_TYPE_QUERY:
ufshcd_prepare_utp_query_req_upiu(hba, upiu_flags);
@ -857,7 +883,9 @@ static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
*/
static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba)
{
return le32_to_cpu(hba->utrdl->header.dword_2) & MASK_OCS;
struct utp_transfer_req_desc *req_desc = hba->utrdl;
return le32_to_cpu(req_desc->header.dword_2) & MASK_OCS;
}
static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
@ -1406,6 +1434,8 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba,
memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len);
memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr));
ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr));
}
static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry,
@ -1420,6 +1450,7 @@ static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
{
struct utp_transfer_req_desc *req_desc = hba->utrdl;
struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr;
uintptr_t aaddr = (uintptr_t)(pccb->pdata) & ~(ARCH_DMA_MINALIGN - 1);
ulong datalen = pccb->datalen;
int table_length;
u8 *buf;
@ -1427,9 +1458,19 @@ static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
if (!datalen) {
req_desc->prd_table_length = 0;
ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc));
return;
}
if (pccb->dma_dir == DMA_TO_DEVICE) { /* Write to device */
flush_dcache_range(aaddr, aaddr +
ALIGN(datalen, ARCH_DMA_MINALIGN));
}
/* In any case, invalidate cache to avoid stale data in it. */
invalidate_dcache_range(aaddr, aaddr +
ALIGN(datalen, ARCH_DMA_MINALIGN));
table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY);
buf = pccb->pdata;
i = table_length;
@ -1443,17 +1484,18 @@ static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1);
req_desc->prd_table_length = table_length;
ufshcd_cache_flush_and_invalidate(prd_table, sizeof(*prd_table) * table_length);
ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc));
}
static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb)
{
struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent);
struct utp_transfer_req_desc *req_desc = hba->utrdl;
u32 upiu_flags;
int ocs, result = 0;
u8 scsi_status;
ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, pccb->dma_dir);
ufshcd_prepare_req_desc_hdr(hba, &upiu_flags, pccb->dma_dir);
ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags);
prepare_prdt_table(hba, pccb);
@ -1630,8 +1672,13 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
if (hba->max_pwr_info.is_valid)
return 0;
pwr_info->pwr_tx = FAST_MODE;
pwr_info->pwr_rx = FAST_MODE;
if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) {
pwr_info->pwr_tx = FASTAUTO_MODE;
pwr_info->pwr_rx = FASTAUTO_MODE;
} else {
pwr_info->pwr_tx = FAST_MODE;
pwr_info->pwr_rx = FAST_MODE;
}
pwr_info->hs_rate = PA_HS_MODE_B;
/* Get the connected lane count */
@ -1889,13 +1936,16 @@ int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops)
/* Read capabilties registers */
hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS)
hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
/* Get UFS version supported by the controller */
hba->version = ufshcd_get_ufs_version(hba);
if (hba->version != UFSHCI_VERSION_10 &&
hba->version != UFSHCI_VERSION_11 &&
hba->version != UFSHCI_VERSION_20 &&
hba->version != UFSHCI_VERSION_21)
hba->version != UFSHCI_VERSION_21 &&
hba->version != UFSHCI_VERSION_30)
dev_err(hba->dev, "invalid UFS version 0x%x\n",
hba->version);
@ -1942,8 +1992,31 @@ int ufs_scsi_bind(struct udevice *ufs_dev, struct udevice **scsi_devp)
return ret;
}
#if IS_ENABLED(CONFIG_BOUNCE_BUFFER)
static int ufs_scsi_buffer_aligned(struct udevice *scsi_dev, struct bounce_buffer *state)
{
#ifdef CONFIG_PHYS_64BIT
struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent);
uintptr_t ubuf = (uintptr_t)state->user_buffer;
size_t len = state->len_aligned;
/* Check if below 32bit boundary */
if ((hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) &&
((ubuf >> 32) || (ubuf + len) >> 32)) {
dev_dbg(scsi_dev, "Buffer above 32bit boundary %lx-%lx\n",
ubuf, ubuf + len);
return 0;
}
#endif
return 1;
}
#endif /* CONFIG_BOUNCE_BUFFER */
static struct scsi_ops ufs_ops = {
.exec = ufs_scsi_exec,
#if IS_ENABLED(CONFIG_BOUNCE_BUFFER)
.buffer_aligned = ufs_scsi_buffer_aligned,
#endif /* CONFIG_BOUNCE_BUFFER */
};
int ufs_probe_dev(int index)

View file

@ -717,7 +717,19 @@ struct ufs_hba {
* the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
* attribute of device to 0).
*/
#define UFSHCD_QUIRK_BROKEN_LCC 0x1
#define UFSHCD_QUIRK_BROKEN_LCC BIT(0)
/*
* This quirk needs to be enabled if the host controller has
* 64-bit addressing supported capability but it doesn't work.
*/
#define UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS BIT(1)
/*
* This quirk needs to be enabled if the host controller has
* auto-hibernate capability but it's FASTAUTO only.
*/
#define UFSHCD_QUIRK_HIBERN_FASTAUTO BIT(2)
/* Virtual memory reference */
struct utp_transfer_cmd_desc *ucdl;
@ -769,6 +781,7 @@ enum {
UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */
UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */
UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */
UFSHCI_VERSION_30 = 0x00000300, /* 3.0 */
};
/* Interrupt disable masks */