summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 12c0443)
raw | patch | inline | side by side (parent: 12c0443)
author | Robert Tivy <rtivy@ti.com> | |
Fri, 23 Jan 2015 01:11:02 +0000 (17:11 -0800) | ||
committer | Robert Tivy <rtivy@ti.com> | |
Fri, 23 Jan 2015 01:11:02 +0000 (17:11 -0800) |
src/cmem/module/cmemk.c | patch | blob | history |
index c58bbfb1b2f8b61e8c5d8ff021fe23ec68d1255a..23b934061aeaafec4de504111cae2444aefe264b 100644 (file)
--- a/src/cmem/module/cmemk.c
+++ b/src/cmem/module/cmemk.c
#ifdef __DEBUG
-//#define __D(fmt, args...) printk(KERN_DEBUG "CMEMK Debug: " fmt, ## args)
-#define __D(fmt, args...) printk("CMEMK Debug: " fmt, ## args)
+#define __D(fmt, args...) printk(KERN_DEBUG "CMEMK Debug: " fmt, ## args)
#else
#define __D(fmt, args...)
#endif
size_t curSize, adjSize;
size_t remainSize; /* free memory after allocated memory */
size_t adjAlign, offset;
-// long key;
-
-#if 0
- /* Assert that requested align is a power of 2 */
- Assert_isTrue(((reqAlign & (reqAlign - 1)) == 0), HeapMem_A_align);
-
- /* Assert that requested block size is non-zero */
- Assert_isTrue((reqSize != 0), HeapMem_A_zeroBlock);
-#endif
adjSize = reqSize;
adjAlign = HEAP_ALIGN;
}
-/*
- * We don't need to enter the "gate" since this function is called
- * with it held already.
- */
-// key = Gate_enterModule();
-
/*
* The block will be allocated from curHeader. Maintain a pointer to
* prevHeader so prevHeader->next can be updated after the alloc.
offset = adjAlign - offset;
}
-#if 0
- /* Internal Assert that offset is a multiple of HEAP_ALIGN */
- Assert_isTrue(((offset & (HEAP_ALIGN - 1)) == 0), NULL);
-#endif
-
/* big enough? */
if (curSize >= (adjSize + offset)) {
/* Set the pointer that will be returned. Alloc from front */
*/
remainSize = curSize - adjSize - offset;
-#if 0
- /* Internal Assert that remainSize is a multiple of HEAP_ALIGN */
- Assert_isTrue(((remainSize & (HEAP_ALIGN - 1)) == 0), NULL);
-#endif
-
if (remainSize) {
newHeaderPhys = allocAddr + adjSize;
map_header((void **)&newHeader, newHeaderPhys,
}
}
-/*
- * See above comment on Gate_enterModule for an explanation of why we
- * don't use the "gate".
- */
-// Gate_leaveModule(key);
-
unmap_header(curHeader, curHeader_vm_area);
/* Success, return the allocated memory */
}
}
-/*
- * See above comment on Gate_enterModule for an explanation of why we
- * don't use the "gate".
- */
-// Gate_leaveModule(key);
-
return 0;
}
*/
void HeapMem_free(int bi, phys_addr_t block, size_t size)
{
-// long key;
struct vm_struct *curHeader_vm_area;
struct vm_struct *newHeader_vm_area;
struct vm_struct *nextHeader_vm_area;
size += HEAP_ALIGN - offset;
}
-/*
- * We don't need to enter the "gate" since this function is called
- * with it held already.
- */
-// key = Gate_enterModule();
-
newHeaderPhys = block;
nextHeaderPhys = heap_head[bi].next;
}
unmap_header(newHeader, newHeader_vm_area);
-
-/*
- * See above comment on Gate_enterModule for an explanation of why we
- * don't use the "gate".
- */
-// Gate_leaveModule(key);
}
/* Traverses the page tables and translates a virtual address to a physical. */
* HeapMem_free()
*/
virtp_end = virtp + size;
-#if 1
outer_inv_range(physp, physp + size);
dmac_map_area(virtp, size, DMA_FROM_DEVICE);
-#else
- dma_sync_single_for_device(NULL, (dma_addr_t)physp, size, DMA_FROM_DEVICE);
-#endif
+
__D("FREEHEAP: invalidated user virtual "
"0x%p -> 0x%p\n", virtp, virtp_end);
}
switch (cmd & ~CMEM_IOCMAGIC) {
case CMEM_IOCCACHEWB:
-#if 1
dmac_map_area(virtp, block.size, DMA_TO_DEVICE);
outer_clean_range(physp, physp + block.size);
-#else
- dma_sync_single_for_device(NULL, (dma_addr_t)physp, block.size, DMA_TO_DEVICE);
-#endif
+
__D("CACHEWB: cleaned user virtual 0x%p -> 0x%p\n",
virtp, virtp_end);
break;
case CMEM_IOCCACHEINV:
-#if 1
outer_inv_range(physp, physp + block.size);
dmac_map_area(virtp, block.size, DMA_FROM_DEVICE);
-#else
- dma_sync_single_for_device(NULL, (dma_addr_t)physp, block.size, DMA_FROM_DEVICE);
-#endif
+
__D("CACHEINV: invalidated user virtual 0x%p -> 0x%p\n",
virtp, virtp_end);
break;
case CMEM_IOCCACHEWBINV:
-#if 1
dmac_map_area(virtp, block.size, DMA_BIDIRECTIONAL);
outer_flush_range(physp, physp + block.size);
-#else
- dma_sync_single_for_device(NULL, (dma_addr_t)physp, block.size, DMA_TO_DEVICE);
- dma_sync_single_for_device(NULL, (dma_addr_t)physp, block.size, DMA_FROM_DEVICE);
-#endif
+
__D("CACHEWBINV: flushed user virtual 0x%p -> 0x%p\n",
virtp, virtp_end);
list_del(u);
kfree(user);
-
-#if 0
- /*
- * If a process is limited to appearing on an entry's
- * registered user list only one time, then the
- * test below could be done as an optimization, since
- * if we're here and it's not last close, we just
- * removed the "current" process from the list (see
- * IOCREGUSER ioctl() command comment).
- */
- if (!last_close) {
- break;
- }
-#endif
}
u = unext;