diff options
author | Jai Luthra | 2024-05-30 06:30:58 -0500 |
---|---|---|
committer | Udit Kumar | 2024-05-31 05:52:54 -0500 |
commit | e0a0ce8c2684c13fab0e65be767d036dfa592ee1 (patch) | |
tree | 117c6512d89e88041e1452e7d740ab5d2acde3f1 | |
parent | 4065fab087a5903f726a62ba7ed13cbcf0543cc2 (diff) | |
download | ti-linux-kernel-e0a0ce8c2684c13fab0e65be767d036dfa592ee1.tar.gz ti-linux-kernel-e0a0ce8c2684c13fab0e65be767d036dfa592ee1.tar.xz ti-linux-kernel-e0a0ce8c2684c13fab0e65be767d036dfa592ee1.zip |
dmaengine: ti: k3-udma: Fix teardown for cyclic PDMA transfers
When receiving data in cyclic mode from PDMA peripherals, where reload
count is set to infinite, any TR in the set can potentially be the last
one of the overall transfer. In such cases, the EOP flag needs to be set
in each TR and PDMA's Static TR "Z" parameter should be set, matching
the size of the TR.
This is required for the teardown to function properly and cleanup the
internal state memory. This only affects platforms using BCDMA and not
those using UDMA-P, which could set EOP flag in the teardown TR
automatically.
Similarly when transmitting data in cyclic mode to PDMA peripherals, the
EOP flag needs to be set to get the teardown completion signal
correctly.
Signed-off-by: Jai Luthra <j-luthra@ti.com>
-rw-r--r-- | drivers/dma/ti/k3-udma.c | 61 |
1 files changed, 46 insertions, 15 deletions
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 710296dfd0ae..fc60e345edbd 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c | |||
@@ -3185,27 +3185,39 @@ static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d, | |||
3185 | 3185 | ||
3186 | d->static_tr.elcnt = elcnt; | 3186 | d->static_tr.elcnt = elcnt; |
3187 | 3187 | ||
3188 | /* | ||
3189 | * PDMA must to close the packet when the channel is in packet mode. | ||
3190 | * For TR mode when the channel is not cyclic we also need PDMA to close | ||
3191 | * the packet otherwise the transfer will stall because PDMA holds on | ||
3192 | * the data it has received from the peripheral. | ||
3193 | */ | ||
3194 | if (uc->config.pkt_mode || !uc->cyclic) { | 3188 | if (uc->config.pkt_mode || !uc->cyclic) { |
3189 | /* | ||
3190 | * PDMA must close the packet when the channel is in packet mode. | ||
3191 | * For TR mode when the channel is not cyclic we also need PDMA | ||
3192 | * to close the packet otherwise the transfer will stall because | ||
3193 | * PDMA holds on the data it has received from the peripheral. | ||
3194 | */ | ||
3195 | unsigned int div = dev_width * elcnt; | 3195 | unsigned int div = dev_width * elcnt; |
3196 | 3196 | ||
3197 | if (uc->cyclic) | 3197 | if (uc->cyclic) |
3198 | d->static_tr.bstcnt = d->residue / d->sglen / div; | 3198 | d->static_tr.bstcnt = d->residue / d->sglen / div; |
3199 | else | 3199 | else |
3200 | d->static_tr.bstcnt = d->residue / div; | 3200 | d->static_tr.bstcnt = d->residue / div; |
3201 | } else if (uc->ud->match_data->type == DMA_TYPE_BCDMA && | ||
3202 | uc->config.dir == DMA_DEV_TO_MEM && !uc->config.pkt_mode && | ||
3203 | uc->cyclic) { | ||
3204 | /* | ||
3205 | * For cyclic TR mode PDMA must close the packet after every TR | ||
3206 | * transfer, as we have to set EOP in each TR to prevent short | ||
3207 | * packet errors seen on channel teardown. | ||
3208 | */ | ||
3209 | struct cppi5_tr_type1_t *tr_req = d->hwdesc[0].tr_req_base; | ||
3201 | 3210 | ||
3202 | if (uc->config.dir == DMA_DEV_TO_MEM && | 3211 | d->static_tr.bstcnt = |
3203 | d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) | 3212 | (tr_req->icnt0 * tr_req->icnt1) / dev_width; |
3204 | return -EINVAL; | ||
3205 | } else { | 3213 | } else { |
3206 | d->static_tr.bstcnt = 0; | 3214 | d->static_tr.bstcnt = 0; |
3207 | } | 3215 | } |
3208 | 3216 | ||
3217 | if (uc->config.dir == DMA_DEV_TO_MEM && | ||
3218 | d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) | ||
3219 | return -EINVAL; | ||
3220 | |||
3209 | return 0; | 3221 | return 0; |
3210 | } | 3222 | } |
3211 | 3223 | ||
@@ -3450,8 +3462,9 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
3450 | /* static TR for remote PDMA */ | 3462 | /* static TR for remote PDMA */ |
3451 | if (udma_configure_statictr(uc, d, dev_width, burst)) { | 3463 | if (udma_configure_statictr(uc, d, dev_width, burst)) { |
3452 | dev_err(uc->ud->dev, | 3464 | dev_err(uc->ud->dev, |
3453 | "%s: StaticTR Z is limited to maximum 4095 (%u)\n", | 3465 | "%s: StaticTR Z is limited to maximum %u (%u)\n", |
3454 | __func__, d->static_tr.bstcnt); | 3466 | __func__, uc->ud->match_data->statictr_z_mask, |
3467 | d->static_tr.bstcnt); | ||
3455 | 3468 | ||
3456 | udma_free_hwdesc(uc, d); | 3469 | udma_free_hwdesc(uc, d); |
3457 | kfree(d); | 3470 | kfree(d); |
@@ -3476,6 +3489,7 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, | |||
3476 | u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; | 3489 | u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; |
3477 | unsigned int i; | 3490 | unsigned int i; |
3478 | int num_tr; | 3491 | int num_tr; |
3492 | u32 period_csf = 0; | ||
3479 | 3493 | ||
3480 | num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0, | 3494 | num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0, |
3481 | &tr0_cnt1, &tr1_cnt0); | 3495 | &tr0_cnt1, &tr1_cnt0); |
@@ -3498,6 +3512,20 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, | |||
3498 | period_addr = buf_addr | | 3512 | period_addr = buf_addr | |
3499 | ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT); | 3513 | ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT); |
3500 | 3514 | ||
3515 | /* | ||
3516 | * For BCDMA <-> PDMA transfers, the EOP flag needs to be set on the | ||
3517 | * last TR of a descriptor, to mark the packet as complete. | ||
3518 | * This is required for getting the teardown completion message in case | ||
3519 | * of TX, and to avoid short-packet error in case of RX. | ||
3520 | * | ||
3521 | * As we are in cyclic mode, we do not know which period might be the | ||
3522 | * last one, so set the flag for each period. | ||
3523 | */ | ||
3524 | if (uc->config.ep_type == PSIL_EP_PDMA_XY && | ||
3525 | uc->ud->match_data->type == DMA_TYPE_BCDMA) { | ||
3526 | period_csf = CPPI5_TR_CSF_EOP; | ||
3527 | } | ||
3528 | |||
3501 | for (i = 0; i < periods; i++) { | 3529 | for (i = 0; i < periods; i++) { |
3502 | int tr_idx = i * num_tr; | 3530 | int tr_idx = i * num_tr; |
3503 | 3531 | ||
@@ -3525,8 +3553,10 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, | |||
3525 | } | 3553 | } |
3526 | 3554 | ||
3527 | if (!(flags & DMA_PREP_INTERRUPT)) | 3555 | if (!(flags & DMA_PREP_INTERRUPT)) |
3528 | cppi5_tr_csf_set(&tr_req[tr_idx].flags, | 3556 | period_csf |= CPPI5_TR_CSF_SUPR_EVT; |
3529 | CPPI5_TR_CSF_SUPR_EVT); | 3557 | |
3558 | if (period_csf) | ||
3559 | cppi5_tr_csf_set(&tr_req[tr_idx].flags, period_csf); | ||
3530 | 3560 | ||
3531 | period_addr += period_len; | 3561 | period_addr += period_len; |
3532 | } | 3562 | } |
@@ -3655,8 +3685,9 @@ udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |||
3655 | /* static TR for remote PDMA */ | 3685 | /* static TR for remote PDMA */ |
3656 | if (udma_configure_statictr(uc, d, dev_width, burst)) { | 3686 | if (udma_configure_statictr(uc, d, dev_width, burst)) { |
3657 | dev_err(uc->ud->dev, | 3687 | dev_err(uc->ud->dev, |
3658 | "%s: StaticTR Z is limited to maximum 4095 (%u)\n", | 3688 | "%s: StaticTR Z is limited to maximum %u (%u)\n", |
3659 | __func__, d->static_tr.bstcnt); | 3689 | __func__, uc->ud->match_data->statictr_z_mask, |
3690 | d->static_tr.bstcnt); | ||
3660 | 3691 | ||
3661 | udma_free_hwdesc(uc, d); | 3692 | udma_free_hwdesc(uc, d); |
3662 | kfree(d); | 3693 | kfree(d); |