2018-05-06 21:58:06 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
2012-08-26 15:19:06 +00:00
|
|
|
/*
|
|
|
|
* Generic bounce buffer implementation
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 Marek Vasut <marex@denx.de>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <common.h>
|
2019-11-14 19:57:39 +00:00
|
|
|
#include <cpu_func.h>
|
2020-05-10 17:40:05 +00:00
|
|
|
#include <log.h>
|
2012-08-26 15:19:06 +00:00
|
|
|
#include <malloc.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <bouncebuf.h>
|
2020-05-10 17:39:56 +00:00
|
|
|
#include <asm/cache.h>
|
2023-01-06 18:02:50 +00:00
|
|
|
#include <linux/dma-mapping.h>
|
2012-08-26 15:19:06 +00:00
|
|
|
|
2012-11-06 11:27:29 +00:00
|
|
|
static int addr_aligned(struct bounce_buffer *state)
|
2012-08-26 15:19:06 +00:00
|
|
|
{
|
|
|
|
const ulong align_mask = ARCH_DMA_MINALIGN - 1;
|
|
|
|
|
|
|
|
/* Check if start is aligned */
|
2012-11-06 11:27:29 +00:00
|
|
|
if ((ulong)state->user_buffer & align_mask) {
|
|
|
|
debug("Unaligned buffer address %p\n", state->user_buffer);
|
2012-08-26 15:19:06 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-11-06 11:27:29 +00:00
|
|
|
/* Check if length is aligned */
|
|
|
|
if (state->len != state->len_aligned) {
|
2014-08-26 10:45:48 +00:00
|
|
|
debug("Unaligned buffer length %zu\n", state->len);
|
2012-08-26 15:19:06 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Aligned */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-04-04 10:45:02 +00:00
|
|
|
int bounce_buffer_start_extalign(struct bounce_buffer *state, void *data,
|
|
|
|
size_t len, unsigned int flags,
|
|
|
|
size_t alignment,
|
|
|
|
int (*addr_is_aligned)(struct bounce_buffer *state))
|
2012-08-26 15:19:06 +00:00
|
|
|
{
|
2012-11-06 11:27:29 +00:00
|
|
|
state->user_buffer = data;
|
|
|
|
state->bounce_buffer = data;
|
|
|
|
state->len = len;
|
2020-04-04 10:45:02 +00:00
|
|
|
state->len_aligned = roundup(len, alignment);
|
2012-11-06 11:27:29 +00:00
|
|
|
state->flags = flags;
|
|
|
|
|
2020-04-04 10:45:02 +00:00
|
|
|
if (!addr_is_aligned(state)) {
|
|
|
|
state->bounce_buffer = memalign(alignment,
|
2012-11-06 11:27:29 +00:00
|
|
|
state->len_aligned);
|
|
|
|
if (!state->bounce_buffer)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (state->flags & GEN_BB_READ)
|
|
|
|
memcpy(state->bounce_buffer, state->user_buffer,
|
|
|
|
state->len);
|
2012-08-26 15:19:06 +00:00
|
|
|
}
|
|
|
|
|
2012-11-06 11:27:29 +00:00
|
|
|
/*
|
|
|
|
* Flush data to RAM so DMA reads can pick it up,
|
|
|
|
* and any CPU writebacks don't race with DMA writes
|
|
|
|
*/
|
2023-01-06 18:02:50 +00:00
|
|
|
dma_map_single(state->bounce_buffer,
|
|
|
|
state->len_aligned,
|
|
|
|
DMA_BIDIRECTIONAL);
|
2012-08-26 15:19:06 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-04 10:45:02 +00:00
|
|
|
int bounce_buffer_start(struct bounce_buffer *state, void *data,
|
|
|
|
size_t len, unsigned int flags)
|
|
|
|
{
|
|
|
|
return bounce_buffer_start_extalign(state, data, len, flags,
|
|
|
|
ARCH_DMA_MINALIGN,
|
|
|
|
addr_aligned);
|
|
|
|
}
|
|
|
|
|
2012-11-06 11:27:29 +00:00
|
|
|
int bounce_buffer_stop(struct bounce_buffer *state)
|
2012-08-26 15:19:06 +00:00
|
|
|
{
|
2012-11-06 11:27:29 +00:00
|
|
|
if (state->flags & GEN_BB_WRITE) {
|
|
|
|
/* Invalidate cache so that CPU can see any newly DMA'd data */
|
2023-08-13 23:47:47 +00:00
|
|
|
dma_unmap_single((dma_addr_t)(uintptr_t)state->bounce_buffer,
|
2023-01-06 18:02:50 +00:00
|
|
|
state->len_aligned,
|
|
|
|
DMA_BIDIRECTIONAL);
|
2012-11-06 11:27:29 +00:00
|
|
|
}
|
2012-08-26 15:19:06 +00:00
|
|
|
|
2012-11-06 11:27:29 +00:00
|
|
|
if (state->bounce_buffer == state->user_buffer)
|
2012-08-26 15:19:06 +00:00
|
|
|
return 0;
|
|
|
|
|
2012-11-06 11:27:29 +00:00
|
|
|
if (state->flags & GEN_BB_WRITE)
|
|
|
|
memcpy(state->user_buffer, state->bounce_buffer, state->len);
|
2012-08-26 15:19:06 +00:00
|
|
|
|
2012-11-06 11:27:29 +00:00
|
|
|
free(state->bounce_buffer);
|
2012-08-26 15:19:06 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|