summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGowtham Tammana2016-09-23 15:33:49 -0500
committerGowtham Tammana2016-12-22 16:38:33 -0600
commit4b4e25f792a343d7233afbec816c7ae56deb645c (patch)
tree4730341833db72c3389d20e3833e86dbc3b71f4f
parent65887c911db495fb66c6847c8b6eadc76c7470f4 (diff)
downloadti-gc320-driver-4b4e25f792a343d7233afbec816c7ae56deb645c.tar.gz
ti-gc320-driver-4b4e25f792a343d7233afbec816c7ae56deb645c.tar.xz
ti-gc320-driver-4b4e25f792a343d7233afbec816c7ae56deb645c.zip
Remove dependency on dmac_flush/inv/clean_range
The driver cache ops routine uses `dmac_{flush,inv,clean}_range` functions which are no longer exported since k4.3. In its place cache operations are performed on individual pages by doing page table walk for the supplied addresses. The functions are placed in the platform specific files in the driver. Signed-off-by: Gowtham Tammana <g-tammana@ti.com>
-rwxr-xr-xsrc/hal/os/linux/kernel/gc_hal_kernel_os.c25
-rwxr-xr-xsrc/hal/os/linux/kernel/platform/ti/gc_hal_kernel_platform_j6.c254
2 files changed, 269 insertions, 10 deletions
diff --git a/src/hal/os/linux/kernel/gc_hal_kernel_os.c b/src/hal/os/linux/kernel/gc_hal_kernel_os.c
index 354a272..546a188 100755
--- a/src/hal/os/linux/kernel/gc_hal_kernel_os.c
+++ b/src/hal/os/linux/kernel/gc_hal_kernel_os.c
@@ -5511,13 +5511,11 @@ gckOS_CacheClean(
5511 return gcvSTATUS_OK; 5511 return gcvSTATUS_OK;
5512 } 5512 }
5513 5513
5514 /* @tga dmac_map_area is not exported starting from k4.3 5514 /* the below is not valid for >k4.3, handling this in platform
5515 * using arm_dma_ops ops. Ideally place this in platform 5515 * cache op
5516 * specific function above.
5517 */ 5516 */
5518 arm_dma_ops.sync_single_for_device(gcvNULL,
5519 (dma_addr_t)Physical, Bytes, DMA_TO_DEVICE);
5520#if 0 5517#if 0
5518
5521#if !gcdCACHE_FUNCTION_UNIMPLEMENTED 5519#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
5522#ifdef CONFIG_ARM 5520#ifdef CONFIG_ARM
5523 5521
@@ -5553,6 +5551,7 @@ gckOS_CacheClean(
5553 DMA_TO_DEVICE); 5551 DMA_TO_DEVICE);
5554#endif 5552#endif
5555#endif 5553#endif
5554
5556#endif 5555#endif
5557 5556
5558 /* Success. */ 5557 /* Success. */
@@ -5623,13 +5622,11 @@ gckOS_CacheInvalidate(
5623 return gcvSTATUS_OK; 5622 return gcvSTATUS_OK;
5624 } 5623 }
5625 5624
5626 /* @tga `dmac_map_area` function/macro is no longer exported starting 5625 /* the below is not valid for >k4.3, handling this in platform
5627 * from k4.3. Using dma mappings API ops instead, ideally these need 5626 * cache op
5628 * to go inside platform specific ops above. TODO
5629 */ 5627 */
5630 arm_dma_ops.sync_single_for_cpu(gcvNULL,
5631 (dma_addr_t)Physical, Bytes, DMA_FROM_DEVICE);
5632#if 0 5628#if 0
5629
5633#if !gcdCACHE_FUNCTION_UNIMPLEMENTED 5630#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
5634#ifdef CONFIG_ARM 5631#ifdef CONFIG_ARM
5635 5632
@@ -5661,6 +5658,7 @@ gckOS_CacheInvalidate(
5661 DMA_FROM_DEVICE); 5658 DMA_FROM_DEVICE);
5662#endif 5659#endif
5663#endif 5660#endif
5661
5664#endif 5662#endif
5665 5663
5666 /* Success. */ 5664 /* Success. */
@@ -5731,6 +5729,11 @@ gckOS_CacheFlush(
5731 return gcvSTATUS_OK; 5729 return gcvSTATUS_OK;
5732 } 5730 }
5733 5731
5732 /* the below is not valid for >k4.3, handling this in platform
5733 * cache op
5734 */
5735#if 0
5736
5734#if !gcdCACHE_FUNCTION_UNIMPLEMENTED 5737#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
5735#ifdef CONFIG_ARM 5738#ifdef CONFIG_ARM
5736 /* Inner cache. */ 5739 /* Inner cache. */
@@ -5758,6 +5761,8 @@ gckOS_CacheFlush(
5758#endif 5761#endif
5759#endif 5762#endif
5760 5763
5764#endif
5765
5761 /* Success. */ 5766 /* Success. */
5762 gcmkFOOTER_NO(); 5767 gcmkFOOTER_NO();
5763 return gcvSTATUS_OK; 5768 return gcvSTATUS_OK;
diff --git a/src/hal/os/linux/kernel/platform/ti/gc_hal_kernel_platform_j6.c b/src/hal/os/linux/kernel/platform/ti/gc_hal_kernel_platform_j6.c
index e760bb6..87727fe 100755
--- a/src/hal/os/linux/kernel/platform/ti/gc_hal_kernel_platform_j6.c
+++ b/src/hal/os/linux/kernel/platform/ti/gc_hal_kernel_platform_j6.c
@@ -61,6 +61,10 @@
61#include <linux/io.h> 61#include <linux/io.h>
62#include <linux/of.h> 62#include <linux/of.h>
63#include <linux/of_device.h> 63#include <linux/of_device.h>
64#include <asm/cacheflush.h>
65#include <linux/slab.h>
66#include <linux/pagemap.h>
67#include <linux/highmem.h>
64 68
65/* 69/*
66 * GC320 platform data struct, using the definition as in 70 * GC320 platform data struct, using the definition as in
@@ -235,6 +239,255 @@ _AdjustParam(
235 return gcvSTATUS_TRUE; 239 return gcvSTATUS_TRUE;
236} 240}
237 241
242static gceSTATUS
243cache_op_on_page(struct page *page, enum dma_data_direction dir)
244{
245 if (!PageHighMem(page) && page_to_phys(page)) {
246 dma_sync_single_for_device(gcvNULL,
247 page_to_phys(page),
248 PAGE_SIZE,
249 dir);
250 } else {
251 flush_dcache_page(page);
252 }
253
254 return gcvSTATUS_OK;
255}
256
257static gceSTATUS
258cache_op_on_mdl(PLINUX_MDL mdl, enum dma_data_direction dir)
259{
260 gceSTATUS status = gcvSTATUS_OK;
261 uint32_t i = 0;
262 uint32_t numPages = mdl->numPages;
263
264 for (i = 0; i < numPages; i++) {
265 struct page *page;
266
267 if (mdl->contiguous) {
268 page = nth_page(mdl->u.contiguousPages, i);
269 } else {
270 page = mdl->u.nonContiguousPages[i];
271 }
272
273 status = cache_op_on_page(page, dir);
274 }
275 return status;
276}
277
278static gceSTATUS
279logical_to_page(gctUINTPTR_T logical, struct page **page)
280{
281 gceSTATUS status = gcvSTATUS_OK;
282 pgd_t *pgd;
283 pmd_t *pmd;
284 pud_t *pud;
285 pte_t *pte;
286 spinlock_t *ptl;
287
288 pgd = pgd_offset(current->mm, logical);
289 if (pgd_none(*pgd) || pgd_bad(*pgd)) {
290 gckOS_DebugTrace(gcvLEVEL_ERROR, "Invalid pgd entry\n");
291 gcmkONERROR(gcvSTATUS_INVALID_ADDRESS);
292 }
293
294 pud = pud_offset(pgd, logical);
295 if (pud_none(*pud) || pud_bad(*pud)) {
296 gckOS_DebugTrace(gcvLEVEL_ERROR, "Invalid pud entry\n");
297 gcmkONERROR(gcvSTATUS_INVALID_ADDRESS);
298 }
299
300 pmd = pmd_offset(pud, logical);
301 if (pmd_none(*pmd) || pmd_bad(*pmd)) {
302 gckOS_DebugTrace(gcvLEVEL_ERROR, "Invalid pmd entry\n");
303 gcmkONERROR(gcvSTATUS_INVALID_ADDRESS);
304 }
305
306 pte = pte_offset_map_lock(current->mm, pmd, logical, &ptl);
307 if (pte_none(*pte) || !pte_present(*pte) || !pte_write(*pte)) {
308 status = gcvSTATUS_INVALID_ADDRESS;
309 goto UnLock;
310 }
311 *page = pte_page(*pte);
312
313UnLock:
314 pte_unmap_unlock(pte, ptl);
315OnError:
316 return status;
317}
318
319static gceSTATUS
320cache_op_on_logical(gctPOINTER logical, gctSIZE_T bytes,
321 enum dma_data_direction dir)
322{
323 gceSTATUS status = gcvSTATUS_OK;
324 gctUINTPTR_T startAddr, endAddr;
325 gctUINTPTR_T startAddrAligned, endAddrAligned;
326 gctSIZE_T pageCount;
327 struct page **pages;
328 int numPagesMapped;
329 int i;
330
331 startAddr = (gctUINTPTR_T)logical;
332 endAddr = startAddr + bytes;
333
334 startAddrAligned = startAddr & PAGE_MASK;
335 endAddrAligned = PAGE_ALIGN(endAddr);
336 pageCount = (endAddrAligned - startAddrAligned) >> PAGE_SHIFT;
337
338 pages = kmalloc(pageCount * sizeof(struct page *), GFP_KERNEL);
339 if (!pages) {
340 status = gcvSTATUS_OUT_OF_MEMORY;
341 goto OnError;
342 }
343 memset(pages, 0, sizeof(struct page *) * pageCount);
344
345 /* lock down user memory */
346 down_read(&current->mm->mmap_sem);
347
348 numPagesMapped = get_user_pages(current, current->mm,
349 startAddr, pageCount, 1, 0, pages, gcvNULL);
350
351 if (numPagesMapped == pageCount) {
352 for (i = 0; i < pageCount; i++) {
353 status = cache_op_on_page(pages[i], dir);
354 }
355 } else {
356 struct vm_area_struct *vma;
357
358 /* get_user_pages didnt work, release the pages if any and look for
359 * the pages in the appropriate memory region of the process. Region
360 * might correspond to memory mapped I/O.
361 */
362 gckOS_DebugTrace(gcvLEVEL_INFO,
363 "get_user_pages failed (%d/%d pages), trying to acquire from CPU page table\n",
364 numPagesMapped, pageCount);
365 if (numPagesMapped > 0) {
366 for (i = 0; i < numPagesMapped; i++) {
367 if (pages[i] == gcvNULL)
368 break;
369
370 page_cache_release(pages[i]);
371 pages[i] = gcvNULL;
372 }
373 numPagesMapped = 0;
374 }
375
376 vma = find_vma(current->mm, startAddr);
377 if (!vma) {
378 gckOS_DebugTrace(gcvLEVEL_ERROR,
379 "%s: find_vma returned error for start address 0x%lx\n",
380 startAddr);
381 status = gcvSTATUS_INVALID_ADDRESS;
382 goto OnExit;
383 }
384
385 /* find_vma locates a region with an end point past a given virtual
386 * address, check if the address is within in the region
387 */
388 if (startAddr < vma->vm_start) {
389 gckOS_DebugTrace(gcvLEVEL_ERROR,
390 "start address 0x%lx is outside the returned vma region\n",
391 startAddr);
392 goto OnExit;
393 }
394 if (endAddr > vma->vm_end) {
395 gckOS_DebugTrace(gcvLEVEL_ERROR,
396 "end address 0x%lx is outside the returned vma region\n",
397 endAddr);
398 goto OnExit;
399 }
400 if (!(vma->vm_flags & VM_IO) || !(vma->vm_flags & VM_PFNMAP)) {
401 gckOS_DebugTrace(gcvLEVEL_ERROR,
402 "Memory region does not represent IO || PFNMAP region (VMA flags 0x%lx)\n",
403 vma->vm_flags);
404 goto OnExit;
405 }
406 if ((vma->vm_flags & (VM_READ | VM_WRITE)) != (VM_READ | VM_WRITE)) {
407 gckOS_DebugTrace(gcvLEVEL_ERROR,
408 "No read/write access to memory region (VMA flags 0x%lx\n",
409 vma->vm_flags);
410 goto OnExit;
411 }
412
413 for (i = 0; i < pageCount; i++) {
414 gctUINTPTR_T logical = startAddr;
415
416 status = logical_to_page(logical, &pages[i]);
417 if (status)
418 goto OnExit;
419 status = cache_op_on_page(pages[i], dir);
420 if (status)
421 goto OnExit;
422
423 logical += PAGE_SIZE;
424 }
425 }
426
427OnExit:
428 up_read(&current->mm->mmap_sem);
429
430 kfree(pages);
431
432OnError:
433 return status;
434}
435
436/*
437 * cache ops: The driver is using the old `dmac_{flush,clean,inv}_range` API
438 * for doing cache ops. This API is no longer available since k4.3 and needed
439 * to be updated. Performing cache ops on individual pages here in the platform
440 * op. This could have resided in the driver as well as no platform specific
441 * changes are in here.
442 */
443static gceSTATUS
444platform_cache(
445 IN gckPLATFORM Platform,
446 IN gctUINT32 ProcessID,
447 IN gctPHYS_ADDR Handle,
448 IN gctUINT32 Physical,
449 IN gctPOINTER Logical,
450 IN gctSIZE_T Bytes,
451 IN gceCACHEOPERATION Operation
452 )
453{
454 gceSTATUS status = gcvSTATUS_OK;
455 enum dma_data_direction dir;
456
457 switch (Operation) {
458 case gcvCACHE_FLUSH:
459 dir = DMA_BIDIRECTIONAL;
460 break;
461 case gcvCACHE_CLEAN:
462 dir = DMA_TO_DEVICE;
463 break;
464 case gcvCACHE_INVALIDATE:
465 dir = DMA_FROM_DEVICE;
466 break;
467 default:
468 return gcvSTATUS_INVALID_ARGUMENT;
469 }
470
471 /* have access to phys addr, use dma api */
472 if (Physical != gcvINVALID_ADDRESS) {
473 dma_sync_single_for_device(gcvNULL,
474 (dma_addr_t)Physical,
475 Bytes,
476 dir);
477 } else if (Handle != gcvNULL) {
478 /* only have access to unified driver `PLINUX_MDL` struct,
479 * query page list from this struct and perform cache ops
480 * on each page.
481 */
482 PLINUX_MDL mdl = (PLINUX_MDL)Handle;
483 status = cache_op_on_mdl(mdl, dir);
484 } else {
485 status = cache_op_on_logical(Logical, Bytes, dir);
486 }
487
488 return status;
489}
490
238gcsPLATFORM_OPERATIONS platformOperations = 491gcsPLATFORM_OPERATIONS platformOperations =
239{ 492{
240 .adjustParam = _AdjustParam, 493 .adjustParam = _AdjustParam,
@@ -242,6 +495,7 @@ gcsPLATFORM_OPERATIONS platformOperations =
242 .needAddDevice = _NeedAddDevice, 495 .needAddDevice = _NeedAddDevice,
243 .getPower = _GetPower, 496 .getPower = _GetPower,
244 .putPower = _PutPower, 497 .putPower = _PutPower,
498 .cache = platform_cache,
245}; 499};
246 500
247void 501void