dma: ti: k3-udma: Add support for native configuration of chan/flow

In absence of Device Manager (DM) services such as at R5 SPL stage,
driver will have to natively setup TCHAN/RCHAN/RFLOW cfg registers.
Add support for the same.

Note that we still need to send chan/flow cfg message to TIFS via TISCI
client driver in order to open up firewalls around chan/flow but setting
up of cfg registers is handled locally.

U-Boot specific code is in a separate file included in main driver so
as to maintain similarity with kernel driver in order to ease porting of
code in future.

Signed-off-by: Vignesh Raghavendra <vigneshr@ti.com>
Signed-off-by: Lokesh Vutla <lokeshvutla@ti.com>
Link: https://lore.kernel.org/r/20210607141753.28796-8-vigneshr@ti.com
This commit is contained in:
Vignesh Raghavendra 2021-06-07 19:47:53 +05:30 committed by Lokesh Vutla
parent 86e58800fd
commit 5abb694d60
2 changed files with 215 additions and 4 deletions

View file

@ -0,0 +1,177 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com
*/
#define UDMA_RCHAN_RFLOW_RNG_FLOWID_CNT_SHIFT (16)
/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
#define UDMA_RFLOW_SRCTAG_NONE 0
#define UDMA_RFLOW_SRCTAG_CFG_TAG 1
#define UDMA_RFLOW_SRCTAG_FLOW_ID 2
#define UDMA_RFLOW_SRCTAG_SRC_TAG 4
#define UDMA_RFLOW_DSTTAG_NONE 0
#define UDMA_RFLOW_DSTTAG_CFG_TAG 1
#define UDMA_RFLOW_DSTTAG_FLOW_ID 2
#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
#define UDMA_RFLOW_RFC_DEFAULT \
((UDMA_RFLOW_SRCTAG_NONE << UDMA_RFLOW_RFC_SRC_TAG_HI_SEL_SHIFT) | \
(UDMA_RFLOW_SRCTAG_SRC_TAG << UDMA_RFLOW_RFC_SRC_TAG_LO_SEL_SHIFT) | \
(UDMA_RFLOW_DSTTAG_DST_TAG_HI << UDMA_RFLOW_RFC_DST_TAG_HI_SEL_SHIFT) | \
(UDMA_RFLOW_DSTTAG_DST_TAG_LO << UDMA_RFLOW_RFC_DST_TAG_LO_SE_SHIFT))
#define UDMA_RFLOW_RFx_REG_FDQ_SIZE_SHIFT (16)
/* TCHAN */
static inline u32 udma_tchan_read(struct udma_tchan *tchan, int reg)
{
if (!tchan)
return 0;
return udma_read(tchan->reg_chan, reg);
}
static inline void udma_tchan_write(struct udma_tchan *tchan, int reg, u32 val)
{
if (!tchan)
return;
udma_write(tchan->reg_chan, reg, val);
}
static inline void udma_tchan_update_bits(struct udma_tchan *tchan, int reg,
u32 mask, u32 val)
{
if (!tchan)
return;
udma_update_bits(tchan->reg_chan, reg, mask, val);
}
/* RCHAN */
static inline u32 udma_rchan_read(struct udma_rchan *rchan, int reg)
{
if (!rchan)
return 0;
return udma_read(rchan->reg_chan, reg);
}
static inline void udma_rchan_write(struct udma_rchan *rchan, int reg, u32 val)
{
if (!rchan)
return;
udma_write(rchan->reg_chan, reg, val);
}
static inline void udma_rchan_update_bits(struct udma_rchan *rchan, int reg,
u32 mask, u32 val)
{
if (!rchan)
return;
udma_update_bits(rchan->reg_chan, reg, mask, val);
}
/* RFLOW */
static inline u32 udma_rflow_read(struct udma_rflow *rflow, int reg)
{
if (!rflow)
return 0;
return udma_read(rflow->reg_rflow, reg);
}
static inline void udma_rflow_write(struct udma_rflow *rflow, int reg, u32 val)
{
if (!rflow)
return;
udma_write(rflow->reg_rflow, reg, val);
}
static inline void udma_rflow_update_bits(struct udma_rflow *rflow, int reg,
u32 mask, u32 val)
{
if (!rflow)
return;
udma_update_bits(rflow->reg_rflow, reg, mask, val);
}
static void udma_alloc_tchan_raw(struct udma_chan *uc)
{
u32 mode, fetch_size;
if (uc->config.pkt_mode)
mode = UDMA_CHAN_CFG_CHAN_TYPE_PACKET_PBRR;
else
mode = UDMA_CHAN_CFG_CHAN_TYPE_3RDP_BC_PBRR;
udma_tchan_update_bits(uc->tchan, UDMA_TCHAN_TCFG_REG,
UDMA_CHAN_CFG_CHAN_TYPE_MASK, mode);
if (uc->config.dir == DMA_MEM_TO_MEM)
fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
else
fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
uc->config.psd_size, 0) >> 2;
udma_tchan_update_bits(uc->tchan, UDMA_TCHAN_TCFG_REG,
UDMA_CHAN_CFG_FETCH_SIZE_MASK, fetch_size);
udma_tchan_write(uc->tchan, UDMA_TCHAN_TCQ_REG,
k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring));
}
static void udma_alloc_rchan_raw(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
int fd_ring = k3_nav_ringacc_get_ring_id(uc->rflow->fd_ring);
int rx_ring = k3_nav_ringacc_get_ring_id(uc->rflow->r_ring);
int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
u32 rx_einfo_present = 0, rx_psinfo_present = 0;
u32 mode, fetch_size, rxcq_num;
if (uc->config.pkt_mode)
mode = UDMA_CHAN_CFG_CHAN_TYPE_PACKET_PBRR;
else
mode = UDMA_CHAN_CFG_CHAN_TYPE_3RDP_BC_PBRR;
udma_rchan_update_bits(uc->rchan, UDMA_RCHAN_RCFG_REG,
UDMA_CHAN_CFG_CHAN_TYPE_MASK, mode);
if (uc->config.dir == DMA_MEM_TO_MEM) {
fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
rxcq_num = tc_ring;
} else {
fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
uc->config.psd_size, 0) >> 2;
rxcq_num = rx_ring;
}
udma_rchan_update_bits(uc->rchan, UDMA_RCHAN_RCFG_REG,
UDMA_CHAN_CFG_FETCH_SIZE_MASK, fetch_size);
udma_rchan_write(uc->rchan, UDMA_RCHAN_RCQ_REG, rxcq_num);
if (uc->config.dir == DMA_MEM_TO_MEM)
return;
if (ud->match_data->type == DMA_TYPE_UDMA &&
uc->rflow->id != uc->rchan->id &&
uc->config.dir != DMA_MEM_TO_MEM)
udma_rchan_write(uc->rchan, UDMA_RCHAN_RFLOW_RNG_REG, uc->rflow->id |
1 << UDMA_RCHAN_RFLOW_RNG_FLOWID_CNT_SHIFT);
if (uc->config.needs_epib)
rx_einfo_present = UDMA_RFLOW_RFA_EINFO;
if (uc->config.psd_size)
rx_psinfo_present = UDMA_RFLOW_RFA_PSINFO;
udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(A),
rx_einfo_present | rx_psinfo_present | rxcq_num);
udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(C), UDMA_RFLOW_RFC_DEFAULT);
udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(D),
fd_ring | fd_ring << UDMA_RFLOW_RFx_REG_FDQ_SIZE_SHIFT);
udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(E),
fd_ring | fd_ring << UDMA_RFLOW_RFx_REG_FDQ_SIZE_SHIFT);
udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(G), fd_ring);
udma_rflow_write(uc->rflow, UDMA_RFLOW_REG(H),
fd_ring | fd_ring << UDMA_RFLOW_RFx_REG_FDQ_SIZE_SHIFT);
}

View file

@ -48,6 +48,9 @@ enum udma_mmr {
MMR_BCHANRT, MMR_BCHANRT,
MMR_RCHANRT, MMR_RCHANRT,
MMR_TCHANRT, MMR_TCHANRT,
MMR_RCHAN,
MMR_TCHAN,
MMR_RFLOW,
MMR_LAST, MMR_LAST,
}; };
@ -56,9 +59,13 @@ static const char * const mmr_names[] = {
[MMR_BCHANRT] = "bchanrt", [MMR_BCHANRT] = "bchanrt",
[MMR_RCHANRT] = "rchanrt", [MMR_RCHANRT] = "rchanrt",
[MMR_TCHANRT] = "tchanrt", [MMR_TCHANRT] = "tchanrt",
[MMR_RCHAN] = "rchan",
[MMR_TCHAN] = "tchan",
[MMR_RFLOW] = "rflow",
}; };
struct udma_tchan { struct udma_tchan {
void __iomem *reg_chan;
void __iomem *reg_rt; void __iomem *reg_rt;
int id; int id;
@ -71,12 +78,14 @@ struct udma_tchan {
#define udma_bchan udma_tchan #define udma_bchan udma_tchan
struct udma_rflow { struct udma_rflow {
void __iomem *reg_rflow;
int id; int id;
struct k3_nav_ring *fd_ring; /* Free Descriptor ring */ struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
struct k3_nav_ring *r_ring; /* Receive ring */ struct k3_nav_ring *r_ring; /* Receive ring */
}; };
struct udma_rchan { struct udma_rchan {
void __iomem *reg_chan;
void __iomem *reg_rt; void __iomem *reg_rt;
int id; int id;
@ -335,6 +344,8 @@ static inline char *udma_get_dir_text(enum dma_direction dir)
return "invalid"; return "invalid";
} }
#include "k3-udma-u-boot.c"
static void udma_reset_uchan(struct udma_chan *uc) static void udma_reset_uchan(struct udma_chan *uc)
{ {
memset(&uc->config, 0, sizeof(uc->config)); memset(&uc->config, 0, sizeof(uc->config));
@ -1014,12 +1025,22 @@ static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
req.txcq_qnum = tc_ring; req.txcq_qnum = tc_ring;
ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
if (ret) if (ret) {
dev_err(ud->dev, "tisci tx alloc failed %d\n", ret); dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
return ret; return ret;
} }
/*
* Above TI SCI call handles firewall configuration, cfg
* register configuration still has to be done locally in
* absence of RM services.
*/
if (IS_ENABLED(CONFIG_K3_DM_FW))
udma_alloc_tchan_raw(uc);
return 0;
}
static int udma_alloc_rchan_sci_req(struct udma_chan *uc) static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
{ {
struct udma_dev *ud = uc->ud; struct udma_dev *ud = uc->ud;
@ -1114,13 +1135,23 @@ static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
&flow_req); &flow_req);
if (ret) if (ret) {
dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n", dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
uc->rchan->id, uc->rflow->id, ret); uc->rchan->id, uc->rflow->id, ret);
return ret; return ret;
} }
/*
* Above TI SCI call handles firewall configuration, cfg
* register configuration still has to be done locally in
* absence of RM services.
*/
if (IS_ENABLED(CONFIG_K3_DM_FW))
udma_alloc_rchan_raw(uc);
return 0;
}
static int udma_alloc_chan_resources(struct udma_chan *uc) static int udma_alloc_chan_resources(struct udma_chan *uc)
{ {
struct udma_dev *ud = uc->ud; struct udma_dev *ud = uc->ud;
@ -1751,6 +1782,7 @@ static int udma_probe(struct udevice *dev)
struct udma_tchan *tchan = &ud->tchans[i]; struct udma_tchan *tchan = &ud->tchans[i];
tchan->id = i; tchan->id = i;
tchan->reg_chan = ud->mmrs[MMR_TCHAN] + UDMA_CH_100(i);
tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i); tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
} }
@ -1758,6 +1790,7 @@ static int udma_probe(struct udevice *dev)
struct udma_rchan *rchan = &ud->rchans[i]; struct udma_rchan *rchan = &ud->rchans[i];
rchan->id = i; rchan->id = i;
rchan->reg_chan = ud->mmrs[MMR_RCHAN] + UDMA_CH_100(i);
rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i); rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
} }
@ -1765,6 +1798,7 @@ static int udma_probe(struct udevice *dev)
struct udma_rflow *rflow = &ud->rflows[i]; struct udma_rflow *rflow = &ud->rflows[i];
rflow->id = i; rflow->id = i;
rflow->reg_rflow = ud->mmrs[MMR_RFLOW] + UDMA_CH_40(i);
} }
for (i = 0; i < ud->ch_count; i++) { for (i = 0; i < ud->ch_count; i++) {