- sdhci: code clean-up and fix cache coherency problem
- enable cache snooping on mpc830x
- Fix build error when MMC_WRITE disabled
This commit is contained in:
Tom Rini 2020-02-20 07:27:31 -05:00
commit 4246fae418
32 changed files with 187 additions and 192 deletions

View file

@ -0,0 +1 @@
/* SPDX-License-Identifier: GPL-2.0-only */

View file

@ -11,10 +11,9 @@
#include <asm/cache.h>
#include <cpu_func.h>
#include <linux/dma-direction.h>
#include <linux/types.h>
#include <malloc.h>
#define dma_mapping_error(x, y) 0
static inline void *dma_alloc_coherent(size_t len, unsigned long *handle)
{
*handle = (unsigned long)memalign(ARCH_DMA_MINALIGN, ROUND(len, ARCH_DMA_MINALIGN));
@ -26,30 +25,4 @@ static inline void dma_free_coherent(void *addr)
free(addr);
}
static inline unsigned long dma_map_single(volatile void *vaddr, size_t len,
enum dma_data_direction dir)
{
unsigned long addr = (unsigned long)vaddr;
len = ALIGN(len, ARCH_DMA_MINALIGN);
if (dir == DMA_FROM_DEVICE)
invalidate_dcache_range(addr, addr + len);
else
flush_dcache_range(addr, addr + len);
return addr;
}
static inline void dma_unmap_single(volatile void *vaddr, size_t len,
enum dma_data_direction dir)
{
unsigned long addr = (unsigned long)vaddr;
len = ALIGN(len, ARCH_DMA_MINALIGN);
if (dir != DMA_TO_DEVICE)
invalidate_dcache_range(addr, addr + len);
}
#endif /* __ASM_ARM_DMA_MAPPING_H */

View file

@ -0,0 +1 @@
/* SPDX-License-Identifier: GPL-2.0-only */

View file

@ -0,0 +1 @@
/* SPDX-License-Identifier: GPL-2.0-only */

View file

@ -0,0 +1 @@
/* SPDX-License-Identifier: GPL-2.0-only */

View file

@ -10,6 +10,7 @@
#include <asm/cache.h>
#include <cpu_func.h>
#include <linux/dma-direction.h>
#include <linux/types.h>
#include <malloc.h>
static void *dma_alloc_coherent(size_t len, unsigned long *handle)
@ -18,30 +19,4 @@ static void *dma_alloc_coherent(size_t len, unsigned long *handle)
return (void *)*handle;
}
static inline unsigned long dma_map_single(volatile void *vaddr, size_t len,
enum dma_data_direction dir)
{
unsigned long addr = (unsigned long)vaddr;
len = ALIGN(len, ARCH_DMA_MINALIGN);
if (dir == DMA_FROM_DEVICE)
invalidate_dcache_range(addr, addr + len);
else
flush_dcache_range(addr, addr + len);
return addr;
}
static inline void dma_unmap_single(volatile void *vaddr, size_t len,
enum dma_data_direction dir)
{
unsigned long addr = (unsigned long)vaddr;
len = ALIGN(len, ARCH_DMA_MINALIGN);
if (dir != DMA_TO_DEVICE)
invalidate_dcache_range(addr, addr + len);
}
#endif /* __ASM_NDS_DMA_MAPPING_H */

View file

@ -0,0 +1 @@
/* SPDX-License-Identifier: GPL-2.0-only */

View file

@ -10,13 +10,12 @@
#define __ASM_RISCV_DMA_MAPPING_H
#include <common.h>
#include <linux/types.h>
#include <asm/cache.h>
#include <cpu_func.h>
#include <linux/dma-direction.h>
#include <malloc.h>
#define dma_mapping_error(x, y) 0
static inline void *dma_alloc_coherent(size_t len, unsigned long *handle)
{
*handle = (unsigned long)memalign(ARCH_DMA_MINALIGN, len);
@ -28,30 +27,4 @@ static inline void dma_free_coherent(void *addr)
free(addr);
}
static inline unsigned long dma_map_single(volatile void *vaddr, size_t len,
enum dma_data_direction dir)
{
unsigned long addr = (unsigned long)vaddr;
len = ALIGN(len, ARCH_DMA_MINALIGN);
if (dir == DMA_FROM_DEVICE)
invalidate_dcache_range(addr, addr + len);
else
flush_dcache_range(addr, addr + len);
return addr;
}
static inline void dma_unmap_single(volatile void *vaddr, size_t len,
enum dma_data_direction dir)
{
unsigned long addr = (unsigned long)vaddr;
len = ALIGN(len, ARCH_DMA_MINALIGN);
if (dir != DMA_TO_DEVICE)
invalidate_dcache_range(addr, addr + len);
}
#endif /* __ASM_RISCV_DMA_MAPPING_H */

View file

@ -0,0 +1 @@
/* SPDX-License-Identifier: GPL-2.0-only */

View file

@ -0,0 +1 @@
/* SPDX-License-Identifier: GPL-2.0-only */

View file

@ -11,10 +11,9 @@
#include <asm/cache.h>
#include <cpu_func.h>
#include <linux/dma-direction.h>
#include <linux/types.h>
#include <malloc.h>
#define dma_mapping_error(x, y) 0
static inline void *dma_alloc_coherent(size_t len, unsigned long *handle)
{
*handle = (unsigned long)memalign(ARCH_DMA_MINALIGN, len);
@ -26,30 +25,4 @@ static inline void dma_free_coherent(void *addr)
free(addr);
}
static inline unsigned long dma_map_single(volatile void *vaddr, size_t len,
enum dma_data_direction dir)
{
unsigned long addr = (unsigned long)vaddr;
len = ALIGN(len, ARCH_DMA_MINALIGN);
if (dir == DMA_FROM_DEVICE)
invalidate_dcache_range(addr, addr + len);
else
flush_dcache_range(addr, addr + len);
return addr;
}
static inline void dma_unmap_single(volatile void *vaddr, size_t len,
enum dma_data_direction dir)
{
unsigned long addr = (unsigned long)vaddr;
len = ALIGN(len, ARCH_DMA_MINALIGN);
if (dir != DMA_TO_DEVICE)
invalidate_dcache_range(addr, addr + len);
}
#endif /* __ASM_X86_DMA_MAPPING_H */

View file

@ -0,0 +1 @@
/* SPDX-License-Identifier: GPL-2.0-only */

View file

@ -10,7 +10,7 @@
#include <asm/io.h>
#include <asm/bitops.h>
#include <malloc.h>
#include <asm/dma-mapping.h>
#include <linux/dma-mapping.h>
#include <dm.h>
#include <dm/device_compat.h>
#include <dm/devres.h>

View file

@ -578,6 +578,18 @@ static int esdhc_set_ios_common(struct fsl_esdhc_priv *priv, struct mmc *mmc)
return 0;
}
static void esdhc_enable_cache_snooping(struct fsl_esdhc *regs)
{
#ifdef CONFIG_ARCH_MPC830X
immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
sysconf83xx_t *sysconf = &immr->sysconf;
setbits_be32(&sysconf->sdhccr, 0x02000000);
#else
esdhc_write32(&regs->esdhcctl, 0x00000040);
#endif
}
static int esdhc_init_common(struct fsl_esdhc_priv *priv, struct mmc *mmc)
{
struct fsl_esdhc *regs = priv->esdhc_regs;
@ -593,8 +605,7 @@ static int esdhc_init_common(struct fsl_esdhc_priv *priv, struct mmc *mmc)
return -ETIMEDOUT;
}
/* Enable cache snooping */
esdhc_write32(&regs->esdhcctl, 0x00000040);
esdhc_enable_cache_snooping(regs);
esdhc_setbits32(&regs->sysctl, SYSCTL_HCKEN | SYSCTL_IPGEN);

View file

@ -24,10 +24,6 @@
#define DEFAULT_CMD6_TIMEOUT_MS 500
static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
static int mmc_power_cycle(struct mmc *mmc);
#if !CONFIG_IS_ENABLED(MMC_TINY)
static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
#endif
#if !CONFIG_IS_ENABLED(DM_MMC)
@ -1126,9 +1122,11 @@ int mmc_hwpart_config(struct mmc *mmc,
ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
#if CONFIG_IS_ENABLED(MMC_WRITE)
/* update erase group size to be high-capacity */
mmc->erase_grp_size =
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
#endif
}
@ -2575,7 +2573,7 @@ static int mmc_startup(struct mmc *mmc)
err = mmc_get_capabilities(mmc);
if (err)
return err;
mmc_select_mode_and_width(mmc, mmc->card_caps);
err = mmc_select_mode_and_width(mmc, mmc->card_caps);
}
#endif
if (err)

View file

@ -11,6 +11,7 @@
#include <common.h>
#include <memalign.h>
#include <mmc.h>
#include <sdhci.h>
#include <u-boot/sha256.h>
#include "mmc_private.h"
@ -91,6 +92,7 @@ static int mmc_rpmb_request(struct mmc *mmc, const struct s_rpmb *s,
{
struct mmc_cmd cmd = {0};
struct mmc_data data;
struct sdhci_host *host = mmc->priv;
int ret;
ret = mmc_set_blockcount(mmc, count, is_rel_write);
@ -105,6 +107,9 @@ static int mmc_rpmb_request(struct mmc *mmc, const struct s_rpmb *s,
cmd.cmdarg = 0;
cmd.resp_type = MMC_RSP_R1;
if (host->quirks & SDHCI_QUIRK_BROKEN_R1B)
cmd.resp_type = MMC_RSP_R1;
data.src = (const char *)s;
data.blocks = 1;
data.blocksize = MMC_MAX_BLOCK_LEN;

View file

@ -171,6 +171,7 @@ static int sdhci_cdns_set_tune_val(struct sdhci_cdns_plat *plat,
{
void __iomem *reg = plat->hrs_addr + SDHCI_CDNS_HRS06;
u32 tmp;
int i, ret;
if (WARN_ON(!FIELD_FIT(SDHCI_CDNS_HRS06_TUNE, val)))
return -EINVAL;
@ -178,11 +179,23 @@ static int sdhci_cdns_set_tune_val(struct sdhci_cdns_plat *plat,
tmp = readl(reg);
tmp &= ~SDHCI_CDNS_HRS06_TUNE;
tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_TUNE, val);
tmp |= SDHCI_CDNS_HRS06_TUNE_UP;
writel(tmp, reg);
return readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_HRS06_TUNE_UP),
1);
/*
* Workaround for IP errata:
* The IP6116 SD/eMMC PHY design has a timing issue on receive data
* path. Send tune request twice.
*/
for (i = 0; i < 2; i++) {
tmp |= SDHCI_CDNS_HRS06_TUNE_UP;
writel(tmp, reg);
ret = readl_poll_timeout(reg, tmp,
!(tmp & SDHCI_CDNS_HRS06_TUNE_UP), 1);
if (ret)
return ret;
}
return 0;
}
static int __maybe_unused sdhci_cdns_execute_tuning(struct udevice *dev,

View file

@ -15,12 +15,7 @@
#include <mmc.h>
#include <sdhci.h>
#include <dm.h>
#if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
void *aligned_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER;
#else
void *aligned_buffer;
#endif
#include <linux/dma-mapping.h>
static void sdhci_reset(struct sdhci_host *host, u8 mask)
{
@ -71,8 +66,8 @@ static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data)
}
#if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
static void sdhci_adma_desc(struct sdhci_host *host, char *buf, u16 len,
bool end)
static void sdhci_adma_desc(struct sdhci_host *host, dma_addr_t dma_addr,
u16 len, bool end)
{
struct sdhci_adma_desc *desc;
u8 attr;
@ -88,9 +83,9 @@ static void sdhci_adma_desc(struct sdhci_host *host, char *buf, u16 len,
desc->attr = attr;
desc->len = len;
desc->reserved = 0;
desc->addr_lo = (dma_addr_t)buf;
desc->addr_lo = lower_32_bits(dma_addr);
#ifdef CONFIG_DMA_ADDR_T_64BIT
desc->addr_hi = (u64)buf >> 32;
desc->addr_hi = upper_32_bits(dma_addr);
#endif
}
@ -100,22 +95,17 @@ static void sdhci_prepare_adma_table(struct sdhci_host *host,
uint trans_bytes = data->blocksize * data->blocks;
uint desc_count = DIV_ROUND_UP(trans_bytes, ADMA_MAX_LEN);
int i = desc_count;
char *buf;
dma_addr_t dma_addr = host->start_addr;
host->desc_slot = 0;
if (data->flags & MMC_DATA_READ)
buf = data->dest;
else
buf = (char *)data->src;
while (--i) {
sdhci_adma_desc(host, buf, ADMA_MAX_LEN, false);
buf += ADMA_MAX_LEN;
sdhci_adma_desc(host, dma_addr, ADMA_MAX_LEN, false);
dma_addr += ADMA_MAX_LEN;
trans_bytes -= ADMA_MAX_LEN;
}
sdhci_adma_desc(host, buf, trans_bytes, true);
sdhci_adma_desc(host, dma_addr, trans_bytes, true);
flush_cache((dma_addr_t)host->adma_desc_table,
ROUND(desc_count * sizeof(struct sdhci_adma_desc),
@ -131,11 +121,12 @@ static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
int *is_aligned, int trans_bytes)
{
unsigned char ctrl;
void *buf;
if (data->flags == MMC_DATA_READ)
host->start_addr = (dma_addr_t)data->dest;
buf = data->dest;
else
host->start_addr = (dma_addr_t)data->src;
buf = (void *)data->src;
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
ctrl &= ~SDHCI_CTRL_DMA_MASK;
@ -145,37 +136,30 @@ static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
ctrl |= SDHCI_CTRL_ADMA32;
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
if (host->flags & USE_SDMA) {
if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
(host->start_addr & 0x7) != 0x0) {
*is_aligned = 0;
host->start_addr = (unsigned long)aligned_buffer;
if (data->flags != MMC_DATA_READ)
memcpy(aligned_buffer, data->src, trans_bytes);
}
#if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
/*
* Always use this bounce-buffer when
* CONFIG_FIXED_SDHCI_ALIGNED_BUFFER is defined
*/
if (host->flags & USE_SDMA &&
(host->force_align_buffer ||
(host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR &&
((unsigned long)buf & 0x7) != 0x0))) {
*is_aligned = 0;
host->start_addr = (unsigned long)aligned_buffer;
if (data->flags != MMC_DATA_READ)
memcpy(aligned_buffer, data->src, trans_bytes);
#endif
sdhci_writel(host, host->start_addr, SDHCI_DMA_ADDRESS);
memcpy(host->align_buffer, buf, trans_bytes);
buf = host->align_buffer;
}
host->start_addr = dma_map_single(buf, trans_bytes,
mmc_get_dma_dir(data));
if (host->flags & USE_SDMA) {
sdhci_writel(host, host->start_addr, SDHCI_DMA_ADDRESS);
} else if (host->flags & (USE_ADMA | USE_ADMA64)) {
sdhci_prepare_adma_table(host, data);
sdhci_writel(host, (u32)host->adma_addr, SDHCI_ADMA_ADDRESS);
sdhci_writel(host, lower_32_bits(host->adma_addr),
SDHCI_ADMA_ADDRESS);
if (host->flags & USE_ADMA64)
sdhci_writel(host, (u64)host->adma_addr >> 32,
sdhci_writel(host, upper_32_bits(host->adma_addr),
SDHCI_ADMA_ADDRESS_HI);
}
flush_cache(host->start_addr, ROUND(trans_bytes, ARCH_DMA_MINALIGN));
}
#else
static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
@ -231,6 +215,10 @@ static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data)
return -ETIMEDOUT;
}
} while (!(stat & SDHCI_INT_DATA_END));
dma_unmap_single(host->start_addr, data->blocks * data->blocksize,
mmc_get_dma_dir(data));
return 0;
}
@ -381,7 +369,7 @@ static int sdhci_send_command(struct mmc *mmc, struct mmc_cmd *cmd,
if (!ret) {
if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
!is_aligned && (data->flags == MMC_DATA_READ))
memcpy(data->dest, aligned_buffer, trans_bytes);
memcpy(data->dest, host->align_buffer, trans_bytes);
return 0;
}
@ -537,7 +525,7 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
void sdhci_set_uhs_timing(struct sdhci_host *host)
{
struct mmc *mmc = (struct mmc *)host->mmc;
struct mmc *mmc = host->mmc;
u32 reg;
reg = sdhci_readw(host, SDHCI_HOST_CONTROL2);
@ -630,14 +618,23 @@ static int sdhci_init(struct mmc *mmc)
sdhci_reset(host, SDHCI_RESET_ALL);
if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && !aligned_buffer) {
aligned_buffer = memalign(8, 512*1024);
if (!aligned_buffer) {
#if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
host->align_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER;
/*
* Always use this bounce-buffer when CONFIG_FIXED_SDHCI_ALIGNED_BUFFER
* is defined.
*/
host->force_align_buffer = true;
#else
if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) {
host->align_buffer = memalign(8, 512 * 1024);
if (!host->align_buffer) {
printf("%s: Aligned buffer alloc failed!!!\n",
__func__);
return -ENOMEM;
}
}
#endif
sdhci_set_power(host, fls(mmc->cfg->voltages) - 1);
@ -741,8 +738,7 @@ int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host,
__func__);
return -EINVAL;
}
host->adma_desc_table = (struct sdhci_adma_desc *)
memalign(ARCH_DMA_MINALIGN, ADMA_TABLE_SZ);
host->adma_desc_table = memalign(ARCH_DMA_MINALIGN, ADMA_TABLE_SZ);
host->adma_addr = (dma_addr_t)host->adma_desc_table;
#ifdef CONFIG_DMA_ADDR_T_64BIT

View file

@ -4,7 +4,6 @@
* Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*/
#include <asm/dma-mapping.h>
#include <common.h>
#include <clk.h>
#include <cpu_func.h>
@ -14,7 +13,7 @@
#include <dm/device_compat.h>
#include <dm/pinctrl.h>
#include <linux/compat.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/sizes.h>
#include <power/regulator.h>
@ -353,7 +352,7 @@ static int tmio_sd_dma_xfer(struct udevice *dev, struct mmc_data *data)
if (poll_flag == TMIO_SD_DMA_INFO1_END_RD)
udelay(1);
dma_unmap_single(buf, len, dir);
dma_unmap_single(dma_addr, len, dir);
return ret;
}

View file

@ -5,14 +5,13 @@
* Copyright (C) 2009-2010, Intel Corporation and its suppliers.
*/
#include <asm/dma-mapping.h>
#include <dm.h>
#include <malloc.h>
#include <nand.h>
#include <dm/device_compat.h>
#include <dm/devres.h>
#include <linux/bitfield.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/io.h>
@ -581,7 +580,7 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
iowrite32(0, denali->reg + DMA_ENABLE);
dma_unmap_single(buf, size, dir);
dma_unmap_single(dma_addr, size, dir);
if (irq_status & INTR__ERASED_PAGE)
memset(buf, 0xff, size);

View file

@ -17,7 +17,7 @@
#include <miiphy.h>
#include <net.h>
#include <asm/cache.h>
#include <asm/dma-mapping.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include "altera_tse.h"

View file

@ -12,7 +12,7 @@
#include <net.h>
#include <linux/errno.h>
#include <asm/io.h>
#include <asm/dma-mapping.h>
#include <linux/dma-mapping.h>
#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
#include <miiphy.h>

View file

@ -38,7 +38,7 @@
#include <linux/mii.h>
#include <asm/io.h>
#include <asm/dma-mapping.h>
#include <linux/dma-mapping.h>
#include <asm/arch/clk.h>
#include <linux/errno.h>
@ -342,7 +342,7 @@ static int _macb_send(struct macb_device *macb, const char *name, void *packet,
udelay(1);
}
dma_unmap_single(packet, length, DMA_TO_DEVICE);
dma_unmap_single(paddr, length, DMA_TO_DEVICE);
if (i <= MACB_TX_TIMEOUT) {
if (ctrl & MACB_BIT(TX_UNDERRUN))

View file

@ -9,7 +9,6 @@
#include <cpu_func.h>
#include <asm/io.h>
#include <malloc.h>
#include <asm/dma-mapping.h>
#include <asm/bitops.h>
#include <dm.h>
#include <dm/device_compat.h>
@ -17,6 +16,7 @@
#include <dm/read.h>
#include <dm/uclass.h>
#include <linux/compat.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/soc/ti/k3-navss-ringacc.h>
#include <linux/soc/ti/ti_sci_protocol.h>

View file

@ -19,7 +19,7 @@
#include <hexdump.h>
#include <scsi.h>
#include <asm/dma-mapping.h>
#include <linux/dma-mapping.h>
#include "ufs.h"

View file

@ -63,7 +63,7 @@
#include <linux/usb/gadget.h>
#include <linux/compat.h>
#include <linux/iopoll.h>
#include <asm/dma-mapping.h>
#include <linux/dma-mapping.h>
#include <linux/bitmap.h>
#include <linux/bug.h>

View file

@ -17,9 +17,9 @@
#include <cpu_func.h>
#include <malloc.h>
#include <dwc3-uboot.h>
#include <asm/dma-mapping.h>
#include <dm/device_compat.h>
#include <dm/devres.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/ioport.h>
#include <dm.h>
@ -288,8 +288,8 @@ static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
return 0;
err1:
dma_unmap_single((void *)(uintptr_t)dwc->scratch_addr, dwc->nr_scratch *
DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
dma_unmap_single(scratch_addr, dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE,
DMA_BIDIRECTIONAL);
err0:
return ret;
@ -303,7 +303,7 @@ static void dwc3_free_scratch_buffers(struct dwc3 *dwc)
if (!dwc->nr_scratch)
return;
dma_unmap_single((void *)(uintptr_t)dwc->scratch_addr, dwc->nr_scratch *
dma_unmap_single(dwc->scratch_addr, dwc->nr_scratch *
DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
kfree(dwc->scratchbuf);
}

View file

@ -16,10 +16,10 @@
#include <common.h>
#include <cpu_func.h>
#include <malloc.h>
#include <asm/dma-mapping.h>
#include <dm/device_compat.h>
#include <dm/devres.h>
#include <linux/bug.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/usb/ch9.h>

View file

@ -18,7 +18,7 @@
#include <linux/compat.h>
#include <malloc.h>
#include <asm/cache.h>
#include <asm/dma-mapping.h>
#include <linux/dma-mapping.h>
#include <common.h>
#include <dm.h>
#include <dm/device-internal.h>
@ -67,7 +67,7 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget,
if (req->length == 0)
return;
dma_unmap_single((void *)(uintptr_t)req->dma, req->length,
dma_unmap_single(req->dma, req->length,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);

View file

@ -0,0 +1,63 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DMA_MAPPING_H
#define _LINUX_DMA_MAPPING_H
#include <linux/dma-direction.h>
#include <linux/types.h>
#include <asm/dma-mapping.h>
#include <cpu_func.h>
#define dma_mapping_error(x, y) 0
/**
* Map a buffer to make it available to the DMA device
*
* Linux-like DMA API that is intended to be used from drivers. This hides the
* underlying cache operation from drivers. Call this before starting the DMA
* transfer. In most of architectures in U-Boot, the virtual address matches to
* the physical address (but we have exceptions like sandbox). U-Boot does not
* support iommu at the driver level, so it also matches to the DMA address.
* Hence, this helper currently just performs the cache operation, then returns
* straight-mapped dma_address, which is intended to be set to the register of
* the DMA device.
*
* @vaddr: address of the buffer
* @len: length of the buffer
* @dir: the direction of DMA
*/
static inline dma_addr_t dma_map_single(void *vaddr, size_t len,
enum dma_data_direction dir)
{
unsigned long addr = (unsigned long)vaddr;
len = ALIGN(len, ARCH_DMA_MINALIGN);
if (dir == DMA_FROM_DEVICE)
invalidate_dcache_range(addr, addr + len);
else
flush_dcache_range(addr, addr + len);
return addr;
}
/**
* Unmap a buffer to make it available to CPU
*
* Linux-like DMA API that is intended to be used from drivers. This hides the
* underlying cache operation from drivers. Call this after finishin the DMA
* transfer.
*
* @addr: DMA address
* @len: length of the buffer
* @dir: the direction of DMA
*/
static inline void dma_unmap_single(dma_addr_t addr, size_t len,
enum dma_data_direction dir)
{
len = ALIGN(len, ARCH_DMA_MINALIGN);
if (dir != DMA_TO_DEVICE)
invalidate_dcache_range(addr, addr + len);
}
#endif

View file

@ -12,6 +12,7 @@
#include <linux/list.h>
#include <linux/sizes.h>
#include <linux/compiler.h>
#include <linux/dma-direction.h>
#include <part.h>
#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
@ -880,4 +881,9 @@ int mmc_get_env_dev(void);
*/
struct blk_desc *mmc_get_blk_desc(struct mmc *mmc);
static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
{
return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
#endif /* _MMC_H_ */

View file

@ -9,6 +9,7 @@
#ifndef __SDHCI_HW_H
#define __SDHCI_HW_H
#include <linux/types.h>
#include <asm/io.h>
#include <mmc.h>
#include <asm/gpio.h>
@ -321,6 +322,8 @@ struct sdhci_host {
uint voltages;
struct mmc_config cfg;
void *align_buffer;
bool force_align_buffer;
dma_addr_t start_addr;
int flags;
#define USE_SDMA (0x1 << 0)