aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Murzin2017-07-20 05:19:58 -0500
committerChristoph Hellwig2017-07-20 09:09:10 -0500
commit43fc509c3efb5c973991ee24c449ab2a0d71dd1e (patch)
tree759d4b3a97ecb73339c560b2814699fbf8f4f93a
parent5771a8c08880cdca3bfb4a3fc6d309d6bba20877 (diff)
downloadlinux-phy-43fc509c3efb5c973991ee24c449ab2a0d71dd1e.tar.gz
linux-phy-43fc509c3efb5c973991ee24c449ab2a0d71dd1e.tar.xz
linux-phy-43fc509c3efb5c973991ee24c449ab2a0d71dd1e.zip
dma-coherent: introduce interface for default DMA pool
Christoph noticed [1] that default DMA pool in current form overload the DMA coherent infrastructure. In reply, Robin suggested [2] to split the per-device vs. global pool interfaces, so allocation/release from default DMA pool is driven by dma ops implementation. This patch implements Robin's idea and provide interface to allocate/release/mmap the default (aka global) DMA pool. To make it clear that existing *_from_coherent routines work on per-device pool rename them to *_from_dev_coherent. [1] https://lkml.org/lkml/2017/7/7/370 [2] https://lkml.org/lkml/2017/7/7/431 Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Ralf Baechle <ralf@linux-mips.org> Suggested-by: Robin Murphy <robin.murphy@arm.com> Tested-by: Andras Szemzo <sza@esh.hu> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r--arch/arc/mm/dma.c2
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--drivers/base/dma-coherent.c164
-rw-r--r--drivers/base/dma-mapping.c2
-rw-r--r--include/linux/dma-mapping.h40
7 files changed, 144 insertions, 72 deletions
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 2a07e6ecafbd..71d3efff99d3 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -117,7 +117,7 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
117 117
118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119 119
120 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 120 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
121 return ret; 121 return ret;
122 122
123 if (off < count && user_count <= (count - off)) { 123 if (off < count && user_count <= (count - off)) {
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e7380bafbfa6..fcf1473d6fed 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -851,7 +851,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
851 unsigned long pfn = dma_to_pfn(dev, dma_addr); 851 unsigned long pfn = dma_to_pfn(dev, dma_addr);
852 unsigned long off = vma->vm_pgoff; 852 unsigned long off = vma->vm_pgoff;
853 853
854 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 854 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
855 return ret; 855 return ret;
856 856
857 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 857 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index e90cd1db42a8..f27d4dd04384 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -329,7 +329,7 @@ static int __swiotlb_mmap(struct device *dev,
329 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, 329 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
330 is_device_dma_coherent(dev)); 330 is_device_dma_coherent(dev));
331 331
332 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 332 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
333 return ret; 333 return ret;
334 334
335 return __swiotlb_mmap_pfn(vma, pfn, size); 335 return __swiotlb_mmap_pfn(vma, pfn, size);
@@ -706,7 +706,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
706 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, 706 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
707 is_device_dma_coherent(dev)); 707 is_device_dma_coherent(dev));
708 708
709 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 709 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
710 return ret; 710 return ret;
711 711
712 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { 712 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index e08598c70b3e..8e78251eccc2 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -232,7 +232,7 @@ static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
232 else 232 else
233 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 233 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
234 234
235 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 235 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
236 return ret; 236 return ret;
237 237
238 if (off < count && user_count <= (count - off)) { 238 if (off < count && user_count <= (count - off)) {
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 2ae24c28e70c..1c152aed6b82 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de
25{ 25{
26 if (dev && dev->dma_mem) 26 if (dev && dev->dma_mem)
27 return dev->dma_mem; 27 return dev->dma_mem;
28 return dma_coherent_default_memory; 28 return NULL;
29} 29}
30 30
31static inline dma_addr_t dma_get_device_base(struct device *dev, 31static inline dma_addr_t dma_get_device_base(struct device *dev,
@@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
165} 165}
166EXPORT_SYMBOL(dma_mark_declared_memory_occupied); 166EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
167 167
168/** 168static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
169 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area 169 ssize_t size, dma_addr_t *dma_handle)
170 *
171 * @dev: device from which we allocate memory
172 * @size: size of requested memory area
173 * @dma_handle: This will be filled with the correct dma handle
174 * @ret: This pointer will be filled with the virtual address
175 * to allocated area.
176 *
177 * This function should be only called from per-arch dma_alloc_coherent()
178 * to support allocation from per-device coherent memory pools.
179 *
180 * Returns 0 if dma_alloc_coherent should continue with allocating from
181 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
182 */
183int dma_alloc_from_coherent(struct device *dev, ssize_t size,
184 dma_addr_t *dma_handle, void **ret)
185{ 170{
186 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
187 int order = get_order(size); 171 int order = get_order(size);
188 unsigned long flags; 172 unsigned long flags;
189 int pageno; 173 int pageno;
190 int dma_memory_map; 174 int dma_memory_map;
175 void *ret;
191 176
192 if (!mem)
193 return 0;
194
195 *ret = NULL;
196 spin_lock_irqsave(&mem->spinlock, flags); 177 spin_lock_irqsave(&mem->spinlock, flags);
197 178
198 if (unlikely(size > (mem->size << PAGE_SHIFT))) 179 if (unlikely(size > (mem->size << PAGE_SHIFT)))
@@ -203,21 +184,50 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
203 goto err; 184 goto err;
204 185
205 /* 186 /*
206 * Memory was found in the per-device area. 187 * Memory was found in the coherent area.
207 */ 188 */
208 *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); 189 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
209 *ret = mem->virt_base + (pageno << PAGE_SHIFT); 190 ret = mem->virt_base + (pageno << PAGE_SHIFT);
210 dma_memory_map = (mem->flags & DMA_MEMORY_MAP); 191 dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
211 spin_unlock_irqrestore(&mem->spinlock, flags); 192 spin_unlock_irqrestore(&mem->spinlock, flags);
212 if (dma_memory_map) 193 if (dma_memory_map)
213 memset(*ret, 0, size); 194 memset(ret, 0, size);
214 else 195 else
215 memset_io(*ret, 0, size); 196 memset_io(ret, 0, size);
216 197
217 return 1; 198 return ret;
218 199
219err: 200err:
220 spin_unlock_irqrestore(&mem->spinlock, flags); 201 spin_unlock_irqrestore(&mem->spinlock, flags);
202 return NULL;
203}
204
205/**
206 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
207 * @dev: device from which we allocate memory
208 * @size: size of requested memory area
209 * @dma_handle: This will be filled with the correct dma handle
210 * @ret: This pointer will be filled with the virtual address
211 * to allocated area.
212 *
213 * This function should be only called from per-arch dma_alloc_coherent()
214 * to support allocation from per-device coherent memory pools.
215 *
216 * Returns 0 if dma_alloc_coherent should continue with allocating from
217 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
218 */
219int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
220 dma_addr_t *dma_handle, void **ret)
221{
222 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
223
224 if (!mem)
225 return 0;
226
227 *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
228 if (*ret)
229 return 1;
230
221 /* 231 /*
222 * In the case where the allocation can not be satisfied from the 232 * In the case where the allocation can not be satisfied from the
223 * per-device area, try to fall back to generic memory if the 233 * per-device area, try to fall back to generic memory if the
@@ -225,25 +235,20 @@ err:
225 */ 235 */
226 return mem->flags & DMA_MEMORY_EXCLUSIVE; 236 return mem->flags & DMA_MEMORY_EXCLUSIVE;
227} 237}
228EXPORT_SYMBOL(dma_alloc_from_coherent); 238EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
229 239
230/** 240void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
231 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
232 * @dev: device from which the memory was allocated
233 * @order: the order of pages allocated
234 * @vaddr: virtual address of allocated pages
235 *
236 * This checks whether the memory was allocated from the per-device
237 * coherent memory pool and if so, releases that memory.
238 *
239 * Returns 1 if we correctly released the memory, or 0 if
240 * dma_release_coherent() should proceed with releasing memory from
241 * generic pools.
242 */
243int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
244{ 241{
245 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); 242 if (!dma_coherent_default_memory)
243 return NULL;
244
245 return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
246 dma_handle);
247}
246 248
249static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
250 int order, void *vaddr)
251{
247 if (mem && vaddr >= mem->virt_base && vaddr < 252 if (mem && vaddr >= mem->virt_base && vaddr <
248 (mem->virt_base + (mem->size << PAGE_SHIFT))) { 253 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
249 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; 254 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
@@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
256 } 261 }
257 return 0; 262 return 0;
258} 263}
259EXPORT_SYMBOL(dma_release_from_coherent);
260 264
261/** 265/**
262 * dma_mmap_from_coherent() - try to mmap the memory allocated from 266 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
263 * per-device coherent memory pool to userspace
264 * @dev: device from which the memory was allocated 267 * @dev: device from which the memory was allocated
265 * @vma: vm_area for the userspace memory 268 * @order: the order of pages allocated
266 * @vaddr: cpu address returned by dma_alloc_from_coherent 269 * @vaddr: virtual address of allocated pages
267 * @size: size of the memory buffer allocated by dma_alloc_from_coherent
268 * @ret: result from remap_pfn_range()
269 * 270 *
270 * This checks whether the memory was allocated from the per-device 271 * This checks whether the memory was allocated from the per-device
271 * coherent memory pool and if so, maps that memory to the provided vma. 272 * coherent memory pool and if so, releases that memory.
272 * 273 *
273 * Returns 1 if we correctly mapped the memory, or 0 if the caller should 274 * Returns 1 if we correctly released the memory, or 0 if the caller should
274 * proceed with mapping memory from generic pools. 275 * proceed with releasing memory from generic pools.
275 */ 276 */
276int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, 277int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
277 void *vaddr, size_t size, int *ret)
278{ 278{
279 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); 279 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
280 280
281 return __dma_release_from_coherent(mem, order, vaddr);
282}
283EXPORT_SYMBOL(dma_release_from_dev_coherent);
284
285int dma_release_from_global_coherent(int order, void *vaddr)
286{
287 if (!dma_coherent_default_memory)
288 return 0;
289
290 return __dma_release_from_coherent(dma_coherent_default_memory, order,
291 vaddr);
292}
293
294static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
295 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
296{
281 if (mem && vaddr >= mem->virt_base && vaddr + size <= 297 if (mem && vaddr >= mem->virt_base && vaddr + size <=
282 (mem->virt_base + (mem->size << PAGE_SHIFT))) { 298 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
283 unsigned long off = vma->vm_pgoff; 299 unsigned long off = vma->vm_pgoff;
@@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
296 } 312 }
297 return 0; 313 return 0;
298} 314}
299EXPORT_SYMBOL(dma_mmap_from_coherent); 315
316/**
317 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
318 * @dev: device from which the memory was allocated
319 * @vma: vm_area for the userspace memory
320 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
321 * @size: size of the memory buffer allocated
322 * @ret: result from remap_pfn_range()
323 *
324 * This checks whether the memory was allocated from the per-device
325 * coherent memory pool and if so, maps that memory to the provided vma.
326 *
327 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
328 * proceed with mapping memory from generic pools.
329 */
330int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
331 void *vaddr, size_t size, int *ret)
332{
333 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
334
335 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
336}
337EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
338
339int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
340 size_t size, int *ret)
341{
342 if (!dma_coherent_default_memory)
343 return 0;
344
345 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
346 vaddr, size, ret);
347}
300 348
301/* 349/*
302 * Support for reserved memory regions defined in device tree 350 * Support for reserved memory regions defined in device tree
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 5096755d185e..b555ff9dd8fc 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -235,7 +235,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
235 235
236 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 236 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
237 237
238 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 238 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
239 return ret; 239 return ret;
240 240
241 if (off < count && user_count <= (count - off)) { 241 if (off < count && user_count <= (count - off)) {
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 843ab866e0f4..03c0196a6f24 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -157,16 +157,40 @@ static inline int is_device_dma_capable(struct device *dev)
157 * These three functions are only for dma allocator. 157 * These three functions are only for dma allocator.
158 * Don't use them in device drivers. 158 * Don't use them in device drivers.
159 */ 159 */
160int dma_alloc_from_coherent(struct device *dev, ssize_t size, 160int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
161 dma_addr_t *dma_handle, void **ret); 161 dma_addr_t *dma_handle, void **ret);
162int dma_release_from_coherent(struct device *dev, int order, void *vaddr); 162int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
163 163
164int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, 164int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
165 void *cpu_addr, size_t size, int *ret); 165 void *cpu_addr, size_t size, int *ret);
166
167void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
168int dma_release_from_global_coherent(int order, void *vaddr);
169int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
170 size_t size, int *ret);
171
166#else 172#else
167#define dma_alloc_from_coherent(dev, size, handle, ret) (0) 173#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
168#define dma_release_from_coherent(dev, order, vaddr) (0) 174#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
169#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0) 175#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
176
177static inline void *dma_alloc_from_global_coherent(ssize_t size,
178 dma_addr_t *dma_handle)
179{
180 return NULL;
181}
182
183static inline int dma_release_from_global_coherent(int order, void *vaddr)
184{
185 return 0;
186}
187
188static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
189 void *cpu_addr, size_t size,
190 int *ret)
191{
192 return 0;
193}
170#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 194#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
171 195
172#ifdef CONFIG_HAS_DMA 196#ifdef CONFIG_HAS_DMA
@@ -481,7 +505,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
481 505
482 BUG_ON(!ops); 506 BUG_ON(!ops);
483 507
484 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) 508 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
485 return cpu_addr; 509 return cpu_addr;
486 510
487 if (!arch_dma_alloc_attrs(&dev, &flag)) 511 if (!arch_dma_alloc_attrs(&dev, &flag))
@@ -503,7 +527,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
503 BUG_ON(!ops); 527 BUG_ON(!ops);
504 WARN_ON(irqs_disabled()); 528 WARN_ON(irqs_disabled());
505 529
506 if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 530 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
507 return; 531 return;
508 532
509 if (!ops->free || !cpu_addr) 533 if (!ops->free || !cpu_addr)