diff options
author | Jai Luthra | 2024-04-29 05:36:55 -0500 |
---|---|---|
committer | Praneeth Bajjuri | 2024-04-29 16:31:30 -0500 |
commit | 67d5b24398155d07376b04dd8b3d858d049a5e17 (patch) | |
tree | f3539f6df7ec11496b1c8b23daab185a286bfba9 | |
parent | 5b06a16aa6a4646cafeefc2ed94e2e4a2b24429c (diff) | |
download | ti-linux-kernel-67d5b24398155d07376b04dd8b3d858d049a5e17.tar.gz ti-linux-kernel-67d5b24398155d07376b04dd8b3d858d049a5e17.tar.xz ti-linux-kernel-67d5b24398155d07376b04dd8b3d858d049a5e17.zip |
dmaengine: ti: k3-udma: Fix teardown for cyclic RX with PDMA
When receiving data in cyclic mode from PDMA peripherals, where reload
count is set to infinite, any TR in the set can potentially be the last
one of the overall transfer. In such cases, the EOP flag needs to be set
in each TR and PDMA's Static TR "Z" parameter should be set, matching
the size of the TR.
This is required for the teardown to function properly and cleanup the
internal state memory. This only affects platforms using BCDMA and not
those using UDMA-P, which could set EOP flag in the teardown TR
automatically.
Signed-off-by: Jai Luthra <j-luthra@ti.com>
Reviewed-by: Kamlesh Gurudasani <kamlesh@ti.com>
-rw-r--r-- | drivers/dma/ti/k3-udma.c | 58 |
1 files changed, 35 insertions, 23 deletions
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 468e5b86eecb..bd590a8ff55d 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c | |||
@@ -1016,14 +1016,9 @@ static int udma_stop(struct udma_chan *uc) | |||
1016 | if (!uc->cyclic && !uc->desc) | 1016 | if (!uc->cyclic && !uc->desc) |
1017 | udma_push_to_ring(uc, -1); | 1017 | udma_push_to_ring(uc, -1); |
1018 | 1018 | ||
1019 | /* FIXME: Doing a forced teardown for McASP PDMA to prevent channel corruption */ | 1019 | udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, |
1020 | if (uc->cyclic && uc->config.ep_type == PSIL_EP_PDMA_XY && | 1020 | UDMA_PEER_RT_EN_ENABLE | |
1021 | uc->config.enable_acc32 && uc->config.enable_burst) | 1021 | UDMA_PEER_RT_EN_TEARDOWN); |
1022 | udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0); | ||
1023 | else | ||
1024 | udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, | ||
1025 | UDMA_PEER_RT_EN_ENABLE | | ||
1026 | UDMA_PEER_RT_EN_TEARDOWN); | ||
1027 | break; | 1022 | break; |
1028 | case DMA_MEM_TO_DEV: | 1023 | case DMA_MEM_TO_DEV: |
1029 | udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, | 1024 | udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, |
@@ -3200,27 +3195,39 @@ static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d, | |||
3200 | 3195 | ||
3201 | d->static_tr.elcnt = elcnt; | 3196 | d->static_tr.elcnt = elcnt; |
3202 | 3197 | ||
3203 | /* | ||
3204 | * PDMA must to close the packet when the channel is in packet mode. | ||
3205 | * For TR mode when the channel is not cyclic we also need PDMA to close | ||
3206 | * the packet otherwise the transfer will stall because PDMA holds on | ||
3207 | * the data it has received from the peripheral. | ||
3208 | */ | ||
3209 | if (uc->config.pkt_mode || !uc->cyclic) { | 3198 | if (uc->config.pkt_mode || !uc->cyclic) { |
3199 | /* | ||
3200 | * PDMA must close the packet when the channel is in packet mode. | ||
3201 | * For TR mode when the channel is not cyclic we also need PDMA | ||
3202 | * to close the packet otherwise the transfer will stall because | ||
3203 | * PDMA holds on the data it has received from the peripheral. | ||
3204 | */ | ||
3210 | unsigned int div = dev_width * elcnt; | 3205 | unsigned int div = dev_width * elcnt; |
3211 | 3206 | ||
3212 | if (uc->cyclic) | 3207 | if (uc->cyclic) |
3213 | d->static_tr.bstcnt = d->residue / d->sglen / div; | 3208 | d->static_tr.bstcnt = d->residue / d->sglen / div; |
3214 | else | 3209 | else |
3215 | d->static_tr.bstcnt = d->residue / div; | 3210 | d->static_tr.bstcnt = d->residue / div; |
3211 | } else if (uc->ud->match_data->type == DMA_TYPE_BCDMA && | ||
3212 | uc->config.dir == DMA_DEV_TO_MEM && !uc->config.pkt_mode && | ||
3213 | uc->cyclic) { | ||
3214 | /* | ||
3215 | * For cyclic TR mode PDMA must close the packet after every TR | ||
3216 | * transfer, as we have to set EOP in each TR to prevent short | ||
3217 | * packet errors seen on channel teardown. | ||
3218 | */ | ||
3219 | struct cppi5_tr_type1_t *tr_req = d->hwdesc[0].tr_req_base; | ||
3216 | 3220 | ||
3217 | if (uc->config.dir == DMA_DEV_TO_MEM && | 3221 | d->static_tr.bstcnt = |
3218 | d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) | 3222 | (tr_req->icnt0 * tr_req->icnt1) / dev_width; |
3219 | return -EINVAL; | ||
3220 | } else { | 3223 | } else { |
3221 | d->static_tr.bstcnt = 0; | 3224 | d->static_tr.bstcnt = 0; |
3222 | } | 3225 | } |
3223 | 3226 | ||
3227 | if (uc->config.dir == DMA_DEV_TO_MEM && | ||
3228 | d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) | ||
3229 | return -EINVAL; | ||
3230 | |||
3224 | return 0; | 3231 | return 0; |
3225 | } | 3232 | } |
3226 | 3233 | ||
@@ -3465,8 +3472,9 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
3465 | /* static TR for remote PDMA */ | 3472 | /* static TR for remote PDMA */ |
3466 | if (udma_configure_statictr(uc, d, dev_width, burst)) { | 3473 | if (udma_configure_statictr(uc, d, dev_width, burst)) { |
3467 | dev_err(uc->ud->dev, | 3474 | dev_err(uc->ud->dev, |
3468 | "%s: StaticTR Z is limited to maximum 4095 (%u)\n", | 3475 | "%s: StaticTR Z is limited to maximum %u (%u)\n", |
3469 | __func__, d->static_tr.bstcnt); | 3476 | __func__, uc->ud->match_data->statictr_z_mask, |
3477 | d->static_tr.bstcnt); | ||
3470 | 3478 | ||
3471 | udma_free_hwdesc(uc, d); | 3479 | udma_free_hwdesc(uc, d); |
3472 | kfree(d); | 3480 | kfree(d); |
@@ -3541,8 +3549,11 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, | |||
3541 | 3549 | ||
3542 | if (!(flags & DMA_PREP_INTERRUPT)) | 3550 | if (!(flags & DMA_PREP_INTERRUPT)) |
3543 | cppi5_tr_csf_set(&tr_req[tr_idx].flags, | 3551 | cppi5_tr_csf_set(&tr_req[tr_idx].flags, |
3544 | CPPI5_TR_CSF_SUPR_EVT); | 3552 | CPPI5_TR_CSF_SUPR_EVT | |
3545 | 3553 | CPPI5_TR_CSF_EOP); | |
3554 | else | ||
3555 | cppi5_tr_csf_set(&tr_req[tr_idx].flags, | ||
3556 | CPPI5_TR_CSF_EOP); | ||
3546 | period_addr += period_len; | 3557 | period_addr += period_len; |
3547 | } | 3558 | } |
3548 | 3559 | ||
@@ -3670,8 +3681,9 @@ udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |||
3670 | /* static TR for remote PDMA */ | 3681 | /* static TR for remote PDMA */ |
3671 | if (udma_configure_statictr(uc, d, dev_width, burst)) { | 3682 | if (udma_configure_statictr(uc, d, dev_width, burst)) { |
3672 | dev_err(uc->ud->dev, | 3683 | dev_err(uc->ud->dev, |
3673 | "%s: StaticTR Z is limited to maximum 4095 (%u)\n", | 3684 | "%s: StaticTR Z is limited to maximum %u (%u)\n", |
3674 | __func__, d->static_tr.bstcnt); | 3685 | __func__, uc->ud->match_data->statictr_z_mask, |
3686 | d->static_tr.bstcnt); | ||
3675 | 3687 | ||
3676 | udma_free_hwdesc(uc, d); | 3688 | udma_free_hwdesc(uc, d); |
3677 | kfree(d); | 3689 | kfree(d); |