summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 63af67b)
raw | patch | inline | side by side (parent: 63af67b)
author | Angela Stegmaier <angelabaker@ti.com> | |
Thu, 12 Sep 2019 17:45:11 +0000 (17:45 +0000) | ||
committer | Suman Anna <s-anna@ti.com> | |
Mon, 23 Sep 2019 01:46:54 +0000 (20:46 -0500) |
Add a new function, dma_malloc_from_coherent(), which
takes a parameter that specifies if the allocated memory
should be zero-ed or not. Modify dma_alloc_from_coherent
to call dma_malloc_from_coherent with the flag set to
true in order to zero the memory. In this way
dma_alloc_from_coherent behaves the same as before.
Signed-off-by: Angela Stegmaier <angelabaker@ti.com>
Signed-off-by: Keerthy <j-keerthy@ti.com>
Signed-off-by: Suman Anna <s-anna@ti.com>
takes a parameter that specifies if the allocated memory
should be zero-ed or not. Modify dma_alloc_from_coherent
to call dma_malloc_from_coherent with the flag set to
true in order to zero the memory. In this way
dma_alloc_from_coherent behaves the same as before.
Signed-off-by: Angela Stegmaier <angelabaker@ti.com>
Signed-off-by: Keerthy <j-keerthy@ti.com>
Signed-off-by: Suman Anna <s-anna@ti.com>
include/linux/dma-mapping.h | patch | blob | history | |
kernel/dma/coherent.c | patch | blob | history |
index 1db6a6b46d0d3dbdb10dbc74cb9e481345f0a9ef..dcc707dafc7a86ba7070dbdf913a5b5069c2eaca 100644 (file)
* Don't use them in device drivers.
*/
int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle, void **ret);
+ dma_addr_t *dma_handle, void **ret,
+ bool zero);
int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
size_t size, int *ret);
#else
-#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
+#define dma_alloc_from_dev_coherent(dev, size, handle, ret, zero) (0)
#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
#define arch_dma_alloc_attrs(dev) (true)
#endif
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- unsigned long attrs)
+static inline void *dma_malloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ unsigned long attrs, bool zero)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
void *cpu_addr;
BUG_ON(!ops);
WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
- if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
+ if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr, zero))
return cpu_addr;
/* let the implementation decide on the zone to allocate from: */
return cpu_addr;
}
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ unsigned long attrs)
+{
+ return dma_malloc_attrs(dev, size, dma_handle, flag, attrs, true);
+}
+
static inline void dma_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
unsigned long attrs)
return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
}
+static inline void *dma_malloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ return dma_malloc_attrs(dev, size, dma_handle, flag, 0, false);
+}
+
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 597d40893862696ed76457c7071c8d5fd074f612..39128ff5821e950e2ed101b456ff247c49ce6f5d 100644 (file)
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
- ssize_t size, dma_addr_t *dma_handle)
+ ssize_t size, dma_addr_t *dma_handle, bool zero)
{
int order = get_order(size);
unsigned long flags;
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
ret = mem->virt_base + (pageno << PAGE_SHIFT);
spin_unlock_irqrestore(&mem->spinlock, flags);
- memset(ret, 0, size);
+ if (zero)
+ memset(ret, 0, size);
return ret;
err:
spin_unlock_irqrestore(&mem->spinlock, flags);
* generic memory areas, or !0 if dma_alloc_coherent should return @ret.
*/
int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle, void **ret)
+ dma_addr_t *dma_handle, void **ret, bool zero)
{
struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
if (!mem)
return 0;
- *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
+ *ret = __dma_alloc_from_coherent(mem, size, dma_handle, zero);
if (*ret)
return 1;
return NULL;
return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
- dma_handle);
+ dma_handle, true);
}
static int __dma_release_from_coherent(struct dma_coherent_mem *mem,