]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - rpmsg/rpmsg.git/commitdiff
HACK: ARM: dma-mapping: create non-zeroing dma_map_ops
authorMartin Ambrose <martin@ti.com>
Thu, 12 Sep 2019 17:45:07 +0000 (17:45 +0000)
committerSuman Anna <s-anna@ti.com>
Mon, 23 Sep 2019 01:46:53 +0000 (20:46 -0500)
A new dma_ops, 'arm_dma_m_ops', is created from a copy of the
standard 'arm_dma_ops' but with a new non-zeroing .alloc method.

These ops are added mainly to support a 'late attach' feature in
the OMAP remoteproc driver. When remoteproc does a 'late attach'
to a remote processor, it does not load any firmware contents into
memory, but it still needs to allocate the processor's CMA memory
to mark the memory as reserved/used from the kernel. The standard
'arm_dma_ops' contains an .alloc method that zeroes out the memory,
thereby overwriting the firmware code/data in the memory that was
pre-loaded before the Linux kernel has booted.

This scenario is handled by adding a new non-zeroing allocation
function and using it as the .alloc method in a copy of the
'arm_dma_ops'. The so created 'arm_dma_m_ops' will be assigned
as the rproc device's dma_ops when supporting the 'late attach'
functionality.

Signed-off-by: Martin Ambrose <martin@ti.com>
Signed-off-by: Robert Tivy <rtivy@ti.com>
Signed-off-by: Suman Anna <s-anna@ti.com>
Signed-off-by: Amarinder Bindra <a-bindra@ti.com>
Signed-off-by: Venkateswara Rao Mandela <venkat.mandela@ti.com>
Signed-off-by: Angela Stegmaier <angelabaker@ti.com>
Signed-off-by: Keerthy <j-keerthy@ti.com>
arch/arm/include/asm/dma-mapping.h
arch/arm/mm/dma-mapping.c

index 8436f6ade57dd145a717c11aa5207854a68c25a0..b16cfe99d35f2723c4cd3752f2aa64d36c0207a6 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/xen/hypervisor.h>
 
 extern const struct dma_map_ops arm_dma_ops;
+extern const struct dma_map_ops arm_dma_m_ops;
 extern const struct dma_map_ops arm_coherent_dma_ops;
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
index 66566472c15384c6eb9fc0bea6045b8c8287e972..70f6cd06e1acbf383de267a76876c9637fc8d766 100644 (file)
@@ -50,6 +50,7 @@ struct arm_dma_alloc_args {
        const void *caller;
        bool want_vaddr;
        int coherent_flag;
+       bool zero;
 };
 
 struct arm_dma_free_args {
@@ -203,6 +204,27 @@ const struct dma_map_ops arm_dma_ops = {
 };
 EXPORT_SYMBOL(arm_dma_ops);
 
+static void *arm_dma_malloc(struct device *dev, size_t size, dma_addr_t *handle,
+                           gfp_t gfp, unsigned long dma_attrs);
+
+const struct dma_map_ops arm_dma_m_ops = {
+       .alloc                  = arm_dma_malloc,
+       .free                   = arm_dma_free,
+       .mmap                   = arm_dma_mmap,
+       .get_sgtable            = arm_dma_get_sgtable,
+       .map_page               = arm_dma_map_page,
+       .unmap_page             = arm_dma_unmap_page,
+       .map_sg                 = arm_dma_map_sg,
+       .unmap_sg               = arm_dma_unmap_sg,
+       .sync_single_for_cpu    = arm_dma_sync_single_for_cpu,
+       .sync_single_for_device = arm_dma_sync_single_for_device,
+       .sync_sg_for_cpu        = arm_dma_sync_sg_for_cpu,
+       .sync_sg_for_device     = arm_dma_sync_sg_for_device,
+       .mapping_error          = arm_dma_mapping_error,
+       .dma_supported          = arm_dma_supported,
+};
+EXPORT_SYMBOL(arm_dma_m_ops);
+
 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
        dma_addr_t *handle, gfp_t gfp, unsigned long attrs);
 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
@@ -356,7 +378,7 @@ static void __dma_free_buffer(struct page *page, size_t size)
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
                                     pgprot_t prot, struct page **ret_page,
                                     const void *caller, bool want_vaddr,
-                                    int coherent_flag, gfp_t gfp);
+                                    int coherent_flag, gfp_t gfp, bool zero);
 
 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
                                 pgprot_t prot, struct page **ret_page,
@@ -413,7 +435,7 @@ static int __init atomic_pool_init(void)
        if (dev_get_cma_area(NULL))
                ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
                                      &page, atomic_pool_init, true, NORMAL,
-                                     GFP_KERNEL);
+                                     GFP_KERNEL, true);
        else
                ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
                                           &page, atomic_pool_init, true);
@@ -587,7 +609,7 @@ static int __free_from_pool(void *start, size_t size)
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
                                     pgprot_t prot, struct page **ret_page,
                                     const void *caller, bool want_vaddr,
-                                    int coherent_flag, gfp_t gfp)
+                                    int coherent_flag, gfp_t gfp, bool zero)
 {
        unsigned long order = get_order(size);
        size_t count = size >> PAGE_SHIFT;
@@ -598,7 +620,8 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
        if (!page)
                return NULL;
 
-       __dma_clear_buffer(page, size, coherent_flag);
+       if (zero)
+               __dma_clear_buffer(page, size, coherent_flag);
 
        if (!want_vaddr)
                goto out;
@@ -675,7 +698,7 @@ static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
        return __alloc_from_contiguous(args->dev, args->size, args->prot,
                                       ret_page, args->caller,
                                       args->want_vaddr, args->coherent_flag,
-                                      args->gfp);
+                                      args->gfp, args->zero);
 }
 
 static void cma_allocator_free(struct arm_dma_free_args *args)
@@ -728,7 +751,7 @@ static struct arm_dma_allocator remap_allocator = {
 
 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
                         gfp_t gfp, pgprot_t prot, bool is_coherent,
-                        unsigned long attrs, const void *caller)
+                        unsigned long attrs, const void *caller, bool zero)
 {
        u64 mask = get_coherent_dma_mask(dev);
        struct page *page = NULL;
@@ -743,6 +766,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
                .caller = caller,
                .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
                .coherent_flag = is_coherent ? COHERENT : NORMAL,
+               .zero = zero,
        };
 
 #ifdef CONFIG_DMA_API_DEBUG
@@ -816,14 +840,27 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
        pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
 
        return __dma_alloc(dev, size, handle, gfp, prot, false,
-                          attrs, __builtin_return_address(0));
+                          attrs, __builtin_return_address(0), true);
+}
+
+/*
+ * Same as arm_dma_alloc except don't zero memory on alloc
+ */
+void *arm_dma_malloc(struct device *dev, size_t size, dma_addr_t *handle,
+                    gfp_t gfp, unsigned long attrs)
+{
+       pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
+
+       return __dma_alloc(dev, size, handle, gfp, prot, false,
+                          attrs, __builtin_return_address(0),
+                          false);
 }
 
 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
        dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
 {
        return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
-                          attrs, __builtin_return_address(0));
+                          attrs, __builtin_return_address(0), true);
 }
 
 static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,