virtio: Use bounce buffers when VIRTIO_F_IOMMU_PLATFORM is set

Devices advertising the VIRTIO_F_IOMMU_PLATFORM feature require
platform-specific handling to configure their DMA transactions.

When handling virtio descriptors for such a device, use bounce
buffers to ensure that the underlying buffers are always aligned
to and padded to PAGE_SIZE in preparation for platform specific
handling at page granularity.

Signed-off-by: Will Deacon <willdeacon@google.com>
[ Paul: pick from the Android tree. Rebase to the upstream ]
Signed-off-by: Ying-Chun Liu (PaulLiu) <paul.liu@linaro.org>
Cc: Bin Meng <bmeng.cn@gmail.com>
Link: 1eff171e61
Reviewed-by: Simon Glass <sjg@chromium.org>
This commit is contained in:
Will Deacon 2023-03-29 22:25:00 +08:00 committed by Tom Rini
parent 37e53db38b
commit b0a2fe148a

View file

@ -28,14 +28,51 @@ static void virtio_free_pages(struct udevice *vdev, void *ptr, u32 npages)
free(ptr); free(ptr);
} }
static int __bb_force_page_align(struct bounce_buffer *state)
{
const ulong align_mask = PAGE_SIZE - 1;
if ((ulong)state->user_buffer & align_mask)
return 0;
if (state->len != state->len_aligned)
return 0;
return 1;
}
static unsigned int virtqueue_attach_desc(struct virtqueue *vq, unsigned int i, static unsigned int virtqueue_attach_desc(struct virtqueue *vq, unsigned int i,
struct virtio_sg *sg, u16 flags) struct virtio_sg *sg, u16 flags)
{ {
struct vring_desc_shadow *desc_shadow = &vq->vring_desc_shadow[i]; struct vring_desc_shadow *desc_shadow = &vq->vring_desc_shadow[i];
struct vring_desc *desc = &vq->vring.desc[i]; struct vring_desc *desc = &vq->vring.desc[i];
void *addr;
if (IS_ENABLED(CONFIG_BOUNCE_BUFFER) && vq->vring.bouncebufs) {
struct bounce_buffer *bb = &vq->vring.bouncebufs[i];
unsigned int bbflags;
int ret;
if (flags & VRING_DESC_F_WRITE)
bbflags = GEN_BB_WRITE;
else
bbflags = GEN_BB_READ;
ret = bounce_buffer_start_extalign(bb, sg->addr, sg->length,
bbflags, PAGE_SIZE,
__bb_force_page_align);
if (ret) {
debug("%s: failed to allocate bounce buffer (length 0x%zx)\n",
vq->vdev->name, sg->length);
}
addr = bb->bounce_buffer;
} else {
addr = sg->addr;
}
/* Update the shadow descriptor. */ /* Update the shadow descriptor. */
desc_shadow->addr = (u64)(uintptr_t)sg->addr; desc_shadow->addr = (u64)(uintptr_t)addr;
desc_shadow->len = sg->length; desc_shadow->len = sg->length;
desc_shadow->flags = flags; desc_shadow->flags = flags;
@ -50,6 +87,15 @@ static unsigned int virtqueue_attach_desc(struct virtqueue *vq, unsigned int i,
static void virtqueue_detach_desc(struct virtqueue *vq, unsigned int idx) static void virtqueue_detach_desc(struct virtqueue *vq, unsigned int idx)
{ {
struct vring_desc *desc = &vq->vring.desc[idx];
struct bounce_buffer *bb;
if (!IS_ENABLED(CONFIG_BOUNCE_BUFFER) || !vq->vring.bouncebufs)
return;
bb = &vq->vring.bouncebufs[idx];
bounce_buffer_stop(bb);
desc->addr = cpu_to_virtio64(vq->vdev, (u64)(uintptr_t)bb->user_buffer);
} }
int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[], int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[],