summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 4f55ca9)
raw | patch | inline | side by side (parent: 4f55ca9)
author | Venkateswara Rao Mandela <venkat.mandela@ti.com> | |
Mon, 16 Sep 2019 20:13:37 +0000 (20:13 +0000) | ||
committer | Suman Anna <s-anna@ti.com> | |
Mon, 23 Sep 2019 01:47:32 +0000 (20:47 -0500) |
The remoteproc module has a concept of "late attach" whereby a remote
core is loaded externally to remoteproc, for which remoteproc must
attach to the core without disrupting its existing state. Introduce an
iommu-based "late attach" model for the same use case.
In the "late attach" model, the iommu subsystem is mostly unused since
the external loader will have programmed the remote core's mmu, but
certain "attach" functionality must be performed so that subsequent
"detach" functionality can complete.
This logic is detected in the driver through a "ti,late-attach" property
set on the IOMMU node in the device tree. The IOMMU node should also have
the "ti,no-init-on-reset" and "ti,no-init-on-idle" so that the omap_hwmod
and omap_device layers do not reset and idle/disable the device during
the initial kernel boot. This "ti,late-attach" is therefore removed from
the device tree on the first probe so that further probes or remoteproc
recovery boots treat the IOMMU device normally.
Signed-off-by: Venkateswara Rao Mandela <venkat.mandela@ti.com>
Signed-off-by: Angela Stegmaier <angelabaker@ti.com>
Signed-off-by: Subash Lakkimsetti <x0091084@ti.com>
Signed-off-by: Suman Anna <s-anna@ti.com>
Signed-off-by: Shravan Karthik <shravan.karthik@ti.com>
Signed-off-by: Keerthy <j-keerthy@ti.com>
core is loaded externally to remoteproc, for which remoteproc must
attach to the core without disrupting its existing state. Introduce an
iommu-based "late attach" model for the same use case.
In the "late attach" model, the iommu subsystem is mostly unused since
the external loader will have programmed the remote core's mmu, but
certain "attach" functionality must be performed so that subsequent
"detach" functionality can complete.
This logic is detected in the driver through a "ti,late-attach" property
set on the IOMMU node in the device tree. The IOMMU node should also have
the "ti,no-init-on-reset" and "ti,no-init-on-idle" so that the omap_hwmod
and omap_device layers do not reset and idle/disable the device during
the initial kernel boot. This "ti,late-attach" is therefore removed from
the device tree on the first probe so that further probes or remoteproc
recovery boots treat the IOMMU device normally.
Signed-off-by: Venkateswara Rao Mandela <venkat.mandela@ti.com>
Signed-off-by: Angela Stegmaier <angelabaker@ti.com>
Signed-off-by: Subash Lakkimsetti <x0091084@ti.com>
Signed-off-by: Suman Anna <s-anna@ti.com>
Signed-off-by: Shravan Karthik <shravan.karthik@ti.com>
Signed-off-by: Keerthy <j-keerthy@ti.com>
index 4abc0ef522a8ef80445947e19ccd21afda3df6f2..72954b2894acb006848a4347eb4c8965ffdea3e0 100644 (file)
continue;
}
- iopte = iopte_offset(iopgd, 0);
+ iopte = iopte_get(obj, iopgd, 0);
for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) {
if (!*iopte)
continue;
index c69d8690052073e841062de9cd8558809630628d..b52ba5717e8e37a2a27f1e3714883a92f73bf636 100644 (file)
/* bitmap of the page sizes currently supported */
#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+/*
+ * total size of L1 and L2 page tables reserved/used by bootloader per rproc
+ * for early boot usecases, must match the value used in bootloader
+ */
+#define EARLY_PAGE_TABLES_SIZE SZ_256K
+
#define MMU_LOCK_BASE_SHIFT 10
#define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
#define MMU_LOCK_BASE(x) \
if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
return -EINVAL;
- pa = virt_to_phys(obj->iopgd);
+ pa = obj->iopgd_pa;
if (!IS_ALIGNED(pa, SZ_16K))
return -EINVAL;
{
int ret;
+ /*
+ * now that the threat of idling has passed, decrement the
+ * device usage count to balance the increment done in probe,
+ * the pm runtime device usage count will be managed normally
+ * from here on
+ */
+ if (obj->late_attach)
+ pm_runtime_put_noidle(obj->dev);
+
ret = pm_runtime_get_sync(obj->dev);
if (ret < 0)
pm_runtime_put_noidle(obj->dev);
}
pte_ready:
- iopte = iopte_offset(iopgd, da);
+ iopte = iopte_get(obj, iopgd, da);
*pt_dma = iopgd_page_paddr(iopgd);
dev_vdbg(obj->dev,
"%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
goto out;
if (iopgd_is_table(*iopgd))
- iopte = iopte_offset(iopgd, da);
+ iopte = iopte_get(obj, iopgd, da);
out:
*ppgd = iopgd;
*ppte = iopte;
if (iopgd_is_table(*iopgd)) {
int i;
- u32 *iopte = iopte_offset(iopgd, da);
+ u32 *iopte = iopte_get(obj, iopgd, da);
bytes = IOPTE_SIZE;
if (*iopte & IOPTE_LARGE) {
nent *= 16;
/* rewind to the 1st entry */
- iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
+ iopte = iopte_get(obj, iopgd, (da & IOLARGE_MASK));
}
bytes *= nent;
memset(iopte, 0, nent * sizeof(*iopte));
/*
* do table walk to check if this table is necessary or not
*/
- iopte = iopte_offset(iopgd, 0);
+ iopte = iopte_get(obj, iopgd, 0);
+
for (i = 0; i < PTRS_PER_IOPTE; i++)
if (iopte[i])
goto out;
if (!*iopgd)
continue;
- if (iopgd_is_table(*iopgd))
- iopte_free(obj, iopte_offset(iopgd, 0), true);
+ if (iopgd_is_table(*iopgd)) {
+ if (obj->late_attach)
+ iopte_free(obj, iopte_offset_lateattach(obj,
+ iopgd,
+ 0),
+ true);
+ else
+ iopte_free(obj, iopte_offset(iopgd, 0), true);
+ }
*iopgd = 0;
flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
return IRQ_NONE;
}
- iopte = iopte_offset(iopgd, da);
+ iopte = iopte_get(obj, iopgd, da);
dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
{
int err;
+ u32 iopgd_pa;
+
+ if (obj->late_attach) {
+ iopgd_pa = iommu_read_reg(obj, MMU_TTB);
+ iopgd = ioremap(iopgd_pa, EARLY_PAGE_TABLES_SIZE);
+ if (!iopgd)
+ return -ENOMEM;
+ } else {
+ iopgd_pa = virt_to_phys(iopgd);
+ }
spin_lock(&obj->iommu_lock);
goto out_err;
}
+ obj->iopgd_pa = iopgd_pa;
obj->iopgd = iopgd;
err = iommu_enable(obj);
if (err)
goto out_err;
- flush_iotlb_all(obj);
+
+ if (!obj->late_attach)
+ flush_iotlb_all(obj);
spin_unlock(&obj->iommu_lock);
if (!obj || IS_ERR(obj))
return;
+ if (obj->late_attach && obj->iopgd)
+ iounmap(obj->iopgd);
+
spin_lock(&obj->iommu_lock);
dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
DMA_TO_DEVICE);
obj->pd_dma = 0;
+
+ obj->iopgd_pa = 0;
obj->iopgd = NULL;
iommu_disable(obj);
+ obj->late_attach = 0;
spin_unlock(&obj->iommu_lock);
}
}
- if (pdata && pdata->deassert_reset) {
+ /* do not deassert reset only during initial boot for late attach */
+ if ((!obj->late_attach || obj->domain) &&
+ pdata && pdata->deassert_reset) {
ret = pdata->deassert_reset(pdev, pdata->reset_name);
if (ret) {
dev_err(dev, "deassert_reset failed: %d\n", ret);
struct omap_iommu *obj;
struct resource *res;
struct device_node *of = pdev->dev.of_node;
+ struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
if (!of) {
pr_err("%s: only DT-based devices are supported\n", __func__);
obj->name = dev_name(&pdev->dev);
obj->nr_tlb_entries = 32;
err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
+
if (err && err != -EINVAL)
return err;
if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
+ if (pdata && pdata->device_is_enabled &&
+ pdata->device_is_enabled(pdev))
+ obj->late_attach = 1;
+
obj->dev = &pdev->dev;
obj->ctx = (void *)obj + sizeof(*obj);
obj->cr_ctx = devm_kzalloc(&pdev->dev,
}
pm_runtime_irq_safe(obj->dev);
+
+ /*
+ * increment the device usage count so that runtime_suspend is not
+ * invoked immediately after the probe (due to the ti,no-idle-on-init)
+ * and before any remoteproc has attached to the iommu
+ */
+ if (obj->late_attach)
+ pm_runtime_get_noresume(obj->dev);
+
pm_runtime_enable(obj->dev);
omap_iommu_debugfs_add(obj);
iommu = odomain->iommus;
for (i = 0; i < odomain->num_iommus; i++, iommu++) {
+ /*
+ * not necessary for late attach, the page table would be setup
+ * by the boot loader. Leaving the below code in place, it does
+ * not have any side effects during late attach.
+ */
iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC);
if (!iommu->pgtable)
return -ENOMEM;
arch_data += (omap_domain->num_iommus - 1);
for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) {
oiommu = iommu->iommu_dev;
- iopgtable_clear_entry_all(oiommu);
+ if (!oiommu->late_attach)
+ iopgtable_clear_entry_all(oiommu);
omap_iommu_detach(oiommu);
iommu->iommu_dev = NULL;
index ca07fbf287d0910f19188ab815b5a6998b5c8bf1..c83810aceb82d255fe30f931e445aa3122883a75 100644 (file)
* but share it globally for each iommu.
*/
u32 *iopgd;
+ u32 iopgd_pa;
+ u32 late_attach;
spinlock_t page_table_lock; /* protect iopgd */
dma_addr_t pd_dma;
return cr->cam & MMU_CAM_V;
}
+static inline u32 *iopte_get(struct omap_iommu *obj, u32 *iopgd, u32 da)
+{
+ if (obj->late_attach)
+ return iopte_offset_lateattach(obj, iopgd, da);
+ else
+ return iopte_offset(iopgd, da);
+}
+
#endif /* _OMAP_IOMMU_H */
index 01a315227bf052d03a0c1f72e6e4e48c6121b201..12fc64f9be5572895b921f01ee28739e4bccb9f3 100644 (file)
#define iopte_index(da) (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
#define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da))
+/*
+ * compute vaddr for second-level page table relative to page table directory
+ * for late-attach mode
+ */
+#define iopgd_page_vaddr_lateattach(obj, pgd) \
+ ((u32 *)((u32 *)((obj)->iopgd)) + \
+ ((u32 *)iopgd_page_paddr((pgd)) - (u32 *)((obj)->iopgd_pa)))
+
+/* to find an entry in the second-level page table for late-attach mode */
+#define iopte_offset_lateattach(obj, iopgd, da) \
+ (iopgd_page_vaddr_lateattach(obj, iopgd) + iopte_index(da))
+
#endif /* _OMAP_IOPGTABLE_H */