aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRalf Goebel2018-08-01 09:50:26 -0500
committerSuman Anna2018-08-10 11:14:14 -0500
commiteda0921e873781918859e0b59c0d6f8e4d7cc566 (patch)
tree1e2ffc06e3703d2129343a26ebc49aba19146ffa
parenta313fc4938b89488859ec26878bd266cc123fa9b (diff)
downloadiommu-linux-4.9.y.tar.gz
iommu-linux-4.9.y.tar.xz
iommu-linux-4.9.y.zip
iommu/omap: Fix cache flushes on L2 page table entriesiommu-linux-4.9.y
The flush_iopte_range() function is used for flushing the programmed L1/L2 page table entries from the cache. This function takes in a base dma address, an offset from that base, and number of entries to be used for computing the address and size to be flushed. The base address used for DMA operations on the second-level table did incorrectly include the offset for the table entry. The L2 page table entry offset was then added again to the base address resulting in flushing the cache for the wrong memory locations. This resulted in occasional MMU faults getting triggered (even though the entries themselves are programmed correctly) when the corresponding address is accessed on the remote processor side when the hardware page table walk is performed and fetching an incorrect translation. Operations on the L1 table are not affected. Fix this by changing base address to point to the beginning of the L2 page directory address. Fixes: a313fc4938b8 ("iommu/omap: Use DMA-API for performing cache flushes") Acked-by: Suman Anna <s-anna@ti.com> Signed-off-by: Ralf Goebel <ralf.goebel@imago-technologies.com> [s-anna@ti.com: backport linux-omap patchwork id '10552411', revise description] Signed-off-by: Suman Anna <s-anna@ti.com>
-rw-r--r--drivers/iommu/omap-iommu.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index ecf55aebbcad..5e539880f0e3 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -557,7 +557,7 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd,
557 557
558pte_ready: 558pte_ready:
559 iopte = iopte_offset(iopgd, da); 559 iopte = iopte_offset(iopgd, da);
560 *pt_dma = virt_to_phys(iopte); 560 *pt_dma = iopgd_page_paddr(iopgd);
561 dev_vdbg(obj->dev, 561 dev_vdbg(obj->dev,
562 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", 562 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
563 __func__, da, iopgd, *iopgd, iopte, *iopte); 563 __func__, da, iopgd, *iopgd, iopte, *iopte);
@@ -745,7 +745,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
745 } 745 }
746 bytes *= nent; 746 bytes *= nent;
747 memset(iopte, 0, nent * sizeof(*iopte)); 747 memset(iopte, 0, nent * sizeof(*iopte));
748 pt_dma = virt_to_phys(iopte); 748 pt_dma = iopgd_page_paddr(iopgd);
749 flush_iopte_range(obj->dev, pt_dma, pt_offset, nent); 749 flush_iopte_range(obj->dev, pt_dma, pt_offset, nent);
750 750
751 /* 751 /*