summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: d0be7e9)
raw | patch | inline | side by side (parent: d0be7e9)
author | Sam Nelson <sam.nelson@ti.com> | |
Fri, 26 Jan 2018 17:35:59 +0000 (12:35 -0500) | ||
committer | Sam Nelson <sam.nelson@ti.com> | |
Fri, 26 Jan 2018 17:35:59 +0000 (12:35 -0500) |
Signed-off-by: Sam Nelson <sam.nelson@ti.com>
src/cmem/module/cmemk.c | patch | blob | history |
index 681ffc0a6730b640fc952e42ca68694a4d0f3f07..5b6bbc791268d4988de254604a023a9fd6f4eeb8 100644 (file)
--- a/src/cmem/module/cmemk.c
+++ b/src/cmem/module/cmemk.c
size_t curSize, adjSize;
size_t remainSize; /* free memory after allocated memory */
size_t adjAlign, offset;
+ int ret_value;
adjSize = reqSize;
/* Loop over the free list. */
while (curHeaderPhys != 0) {
- map_header((void **)&curHeader, curHeaderPhys, &curHeader_vm_area);
+ ret_value = map_header((void **)&curHeader, curHeaderPhys, &curHeader_vm_area);
+ if (ret_value < 0) {
+ return 0;
+ }
curSize = curHeader->size;
/*
if (remainSize) {
newHeaderPhys = allocAddr + adjSize;
- map_header((void **)&newHeader, newHeaderPhys,
- &newHeader_vm_area);
+ ret_value = map_header((void **)&newHeader, newHeaderPhys,
+ &newHeader_vm_area);
+ if (ret_value < 0)
+ return 0;
newHeader->next = curHeader->next;
newHeader->size = remainSize;
* it is safe.
*/
if (prevHeaderPhys != 0) {
- map_header((void **)&prevHeader, prevHeaderPhys,
- &prevHeader_vm_area);
+ ret_value = map_header((void **)&prevHeader, prevHeaderPhys,
+ &prevHeader_vm_area);
+ if (ret_value < 0)
+ return 0;
}
else {
prevHeader = &heap_head[bi];
phys_addr_t newHeaderPhys;
phys_addr_t nextHeaderPhys;
size_t offset;
+ int ret_value;
/* Restore size to actual allocated size */
if ((offset = size & (HEAP_ALIGN - 1)) != 0) {
/* Go down freelist and find right place for buf */
while (nextHeaderPhys != 0 && nextHeaderPhys < newHeaderPhys) {
- map_header((void **)&nextHeader, nextHeaderPhys, &nextHeader_vm_area);
+ ret_value = map_header((void **)&nextHeader, nextHeaderPhys, &nextHeader_vm_area);
+ if (ret_value < 0)
+ return;
curHeaderPhys = nextHeaderPhys;
nextHeaderPhys = nextHeader->next;
unmap_header(nextHeader, nextHeader_vm_area);
}
- map_header((void **)&newHeader, newHeaderPhys, &newHeader_vm_area);
+ ret_value = map_header((void **)&newHeader, newHeaderPhys, &newHeader_vm_area);
+ if (ret_value < 0)
+ return;
if (curHeaderPhys != 0) {
- map_header((void **)&curHeader, curHeaderPhys, &curHeader_vm_area);
+ ret_value = map_header((void **)&curHeader, curHeaderPhys, &curHeader_vm_area);
+ if (ret_value < 0)
+ return;
}
else {
curHeader = &heap_head[bi];
/* Join contiguous free blocks */
/* Join with upper block */
if (nextHeaderPhys != 0 && (newHeaderPhys + size) == nextHeaderPhys) {
- map_header((void **)&nextHeader, nextHeaderPhys, &nextHeader_vm_area);
-
+ ret_value = map_header((void **)&nextHeader, nextHeaderPhys, &nextHeader_vm_area);
+ if (ret_value < 0)
+ return;
newHeader->next = nextHeader->next;
newHeader->size += nextHeader->size;
for (e = busylistp->next; e != busylistp; e = e->next) {
entry = list_entry(e, struct pool_buffer, element);
+ if ( entry != NULL )
+ __D("Busy: Buffer with id %d and physical address %#llx\n",
+ entry->id, (unsigned long long)entry->physp);
+ }
- __D("Busy: Buffer with id %d and physical address %#llx\n",
- entry->id, (unsigned long long)entry->physp);
- }
+ if (bi < NBLOCKS) {
- __D("Freelist for pool %d:\n", idx);
- for (e = freelistp->next; e != freelistp; e = e->next) {
+ __D("Freelist for pool %d:\n", idx);
+ for (e = freelistp->next; e != freelistp; e = e->next) {
entry = list_entry(e, struct pool_buffer, element);
-
- __D("Free: Buffer with id %d and physical address %#llx\n",
- entry->id, (unsigned long long)entry->physp);
+ if ( entry != NULL )
+ __D("Free: Buffer with id %d and physical address %#llx\n",
+ entry->id, (unsigned long long)entry->physp);
+ }
}
mutex_unlock(&cmem_mutex);
allocDesc.alloc_pool_outparams.size = size;
if (copy_to_user(argp, &allocDesc, sizeof(allocDesc))) {
- mutex_unlock(&cmem_mutex);
return -EFAULT;
}
}
/* Lookup physp in the busy entry list */
entry = find_busy_entry(physp, &pool, &e, &bi, NULL);
+ if (entry == NULL) {
+ __E(" Failed to find entry virtp: %p physp: %#llx \n",
+ (void *)dmabuf_desc.virtp, (unsigned long long)physp);
+ mutex_unlock(&cmem_mutex);
+ return -EFAULT;
+ }
+
/* Export to dmabuf */
dmabuf = cmem_dmabuf_export(entry, O_RDWR);
if (IS_ERR(dmabuf)) {
ret = dma_buf_fd(dmabuf, O_CLOEXEC);
if (ret < 0) {
dma_buf_put(dmabuf);
+ mutex_unlock(&cmem_mutex);
return -EFAULT;
}
heap_head[bi].next = heap_physp[bi];
heap_head[bi].size = heap_size[bi];
- map_header((void **)&virtp, heap_physp[bi], &ioremap_area);
+ err = map_header((void **)&virtp, heap_physp[bi], &ioremap_area);
+ if (err < 0) {
+ __E("Failed to alloc pool of size 0x%llu and number of buffers %d\n", pool_size[bi][i], pool_num_buffers[bi][i]);
+ err = -ENOMEM;
+
+ goto fail_after_create;
+ }
header = (HeapMem_Header *)virtp;
header->next = 0;