c5f79b7256a822de8cb7a201f6955b9fcde75ee3
1 /*
2 * Copyright (c) 2012-2013, Texas Instruments Incorporated
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * * Neither the name of Texas Instruments Incorporated nor the names of
17 * its contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
24 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
27 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
28 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32 /*
33 * ======== HeapMemMP.c ========
34 */
36 #include <xdc/std.h>
37 #include <xdc/runtime/Error.h>
38 #include <xdc/runtime/Assert.h>
39 #include <xdc/runtime/Memory.h>
40 #include <xdc/runtime/IHeap.h>
42 #include <ti/sysbios/hal/Cache.h>
44 #include <ti/sdo/ipc/_Ipc.h>
45 #include <ti/sdo/utils/_NameServer.h>
46 #include <ti/sdo/utils/_MultiProc.h>
47 #include <ti/sdo/ipc/heaps/_HeapMemMP.h>
48 #include <ti/sdo/ipc/_SharedRegion.h>
49 #include <ti/sdo/ipc/_GateMP.h>
51 #include "package/internal/HeapMemMP.xdc.h"
53 #ifdef __ti__
54 #pragma FUNC_EXT_CALLED(HeapMemMP_Params_init);
55 #pragma FUNC_EXT_CALLED(HeapMemMP_alloc);
56 #pragma FUNC_EXT_CALLED(HeapMemMP_close);
57 #pragma FUNC_EXT_CALLED(HeapMemMP_create);
58 #pragma FUNC_EXT_CALLED(HeapMemMP_delete);
59 #pragma FUNC_EXT_CALLED(HeapMemMP_free);
60 #pragma FUNC_EXT_CALLED(HeapMemMP_getExtendedStats);
61 #pragma FUNC_EXT_CALLED(HeapMemMP_getStats);
62 #pragma FUNC_EXT_CALLED(HeapMemMP_open);
63 #pragma FUNC_EXT_CALLED(HeapMemMP_openByAddr);
64 #pragma FUNC_EXT_CALLED(HeapMemMP_restore);
65 #pragma FUNC_EXT_CALLED(HeapMemMP_sharedMemReq);
66 #endif
68 /*
69 * ======== HeapMemMP_getSharedParams ========
70 */
71 static Void HeapMemMP_getSharedParams(HeapMemMP_Params *sparams,
72 const ti_sdo_ipc_heaps_HeapMemMP_Params *params)
73 {
74 sparams->gate = (GateMP_Handle)params->gate;
75 sparams->name = params->name;
76 sparams->regionId = params->regionId;
77 sparams->sharedAddr = params->sharedAddr;
78 sparams->sharedBufSize = params->sharedBufSize;
79 }
81 /*
82 * ======== HeapMemMP_getRTSCParams ========
83 */
84 static Void HeapMemMP_getRTSCParams(
85 ti_sdo_ipc_heaps_HeapMemMP_Params *params,
86 const HeapMemMP_Params *sparams)
87 {
88 ti_sdo_ipc_heaps_HeapMemMP_Params_init(params);
90 params->gate = (ti_sdo_ipc_GateMP_Handle)sparams->gate;
91 params->name = sparams->name;
92 params->regionId = sparams->regionId;
93 params->sharedAddr = sparams->sharedAddr;
94 params->sharedBufSize = sparams->sharedBufSize;
95 }
97 /*
98 *************************************************************************
99 * Common Header Functions
100 *************************************************************************
101 */
103 /*
104 * ======== HeapMemMP_Params_init ========
105 */
106 Void HeapMemMP_Params_init(HeapMemMP_Params *sparams)
107 {
108 ti_sdo_ipc_heaps_HeapMemMP_Params params;
110 ti_sdo_ipc_heaps_HeapMemMP_Params_init(¶ms);
111 HeapMemMP_getSharedParams(sparams, ¶ms);
112 }
114 /*
115 * ======== HeapMemMP_alloc ========
116 */
117 Ptr HeapMemMP_alloc(HeapMemMP_Handle handle, SizeT size, SizeT align)
118 {
119 Error_Block eb;
121 Error_init(&eb);
123 return (ti_sdo_ipc_heaps_HeapMemMP_alloc(
124 (ti_sdo_ipc_heaps_HeapMemMP_Handle)handle, size, align, &eb));
125 }
127 /*
128 * ======== HeapMemMP_create ========
129 */
130 HeapMemMP_Handle HeapMemMP_create(const HeapMemMP_Params *sparams)
131 {
132 ti_sdo_ipc_heaps_HeapMemMP_Params params;
133 ti_sdo_ipc_heaps_HeapMemMP_Object *obj;
134 Error_Block eb;
136 Error_init(&eb);
138 if (sparams != NULL) {
139 HeapMemMP_getRTSCParams(¶ms, (Ptr)sparams);
141 /* call the module create */
142 obj = ti_sdo_ipc_heaps_HeapMemMP_create(¶ms, &eb);
143 }
144 else {
145 obj = ti_sdo_ipc_heaps_HeapMemMP_create(NULL, &eb);
146 }
148 return ((HeapMemMP_Handle)obj);
149 }
151 /*
152 * ======== HeapMemMP_close ========
153 */
154 Int HeapMemMP_close(HeapMemMP_Handle *handlePtr)
155 {
156 HeapMemMP_delete(handlePtr);
158 return (HeapMemMP_S_SUCCESS);
159 }
161 /*
162 * ======== HeapMemMP_delete ========
163 */
164 Int HeapMemMP_delete(HeapMemMP_Handle *handlePtr)
165 {
166 ti_sdo_ipc_heaps_HeapMemMP_delete(
167 (ti_sdo_ipc_heaps_HeapMemMP_Handle *)handlePtr);
169 return (HeapMemMP_S_SUCCESS);
170 }
172 /*
173 * ======== HeapMemMP_free ========
174 */
175 Void HeapMemMP_free(HeapMemMP_Handle handle, Ptr addr, SizeT size)
176 {
177 ti_sdo_ipc_heaps_HeapMemMP_free(
178 (ti_sdo_ipc_heaps_HeapMemMP_Handle)handle, addr, size);
179 }
181 /*
182 * ======== HeapMemMP_getExtendedStats ========
183 */
184 Void HeapMemMP_getExtendedStats(HeapMemMP_Handle handle,
185 HeapMemMP_ExtendedStats *stats)
186 {
187 ti_sdo_ipc_heaps_HeapMemMP_Object *obj =
188 (ti_sdo_ipc_heaps_HeapMemMP_Object *)handle;
190 stats->buf = obj->buf;
191 stats->size = obj->bufSize;
192 }
194 /*
195 * ======== HeapMemMP_getStats ========
196 */
197 Void HeapMemMP_getStats(HeapMemMP_Handle handle, Ptr stats)
198 {
199 ti_sdo_ipc_heaps_HeapMemMP_getStats(
200 (ti_sdo_ipc_heaps_HeapMemMP_Handle)handle, (Memory_Stats *)stats);
201 }
203 /*
204 * ======== HeapMemMP_open ========
205 */
206 Int HeapMemMP_open(String name, HeapMemMP_Handle *handlePtr)
207 {
208 SharedRegion_SRPtr sharedShmBase;
209 Int status;
210 Ptr sharedAddr;
211 Error_Block eb;
213 Error_init(&eb);
215 /* Assert that a pointer has been supplied */
216 Assert_isTrue(handlePtr != NULL, ti_sdo_ipc_Ipc_A_nullArgument);
218 /* Assert that a name has been supplied */
219 Assert_isTrue(name != NULL, ti_sdo_ipc_Ipc_A_invParam);
221 /* Open by name */
222 status = NameServer_getUInt32(
223 (NameServer_Handle)HeapMemMP_module->nameServer, name,
224 &sharedShmBase, ti_sdo_utils_MultiProc_procIdList);
226 if (status < 0) {
227 /* Name not found. */
228 *handlePtr = NULL;
229 return (HeapMemMP_E_NOTFOUND);
230 }
232 sharedAddr = SharedRegion_getPtr(sharedShmBase);
234 status = HeapMemMP_openByAddr(sharedAddr, handlePtr);
236 return (status);
237 }
239 /*
240 * ======== HeapMemMP_openByAddr ========
241 */
242 Int HeapMemMP_openByAddr(Ptr sharedAddr,
243 HeapMemMP_Handle *handlePtr)
244 {
245 ti_sdo_ipc_heaps_HeapMemMP_Params params;
246 ti_sdo_ipc_heaps_HeapMemMP_Attrs *attrs;
247 Int status;
248 Error_Block eb;
250 Error_init(&eb);
252 ti_sdo_ipc_heaps_HeapMemMP_Params_init(¶ms);
254 /* Tell Instance_init() that we're opening */
255 params.openFlag = TRUE;
257 params.sharedAddr = sharedAddr;
258 attrs = (ti_sdo_ipc_heaps_HeapMemMP_Attrs *)sharedAddr;
260 if (SharedRegion_isCacheEnabled(SharedRegion_getId(sharedAddr))) {
261 Cache_inv(attrs, sizeof(ti_sdo_ipc_heaps_HeapMemMP_Attrs),
262 Cache_Type_ALL, TRUE);
263 }
265 if (attrs->status != ti_sdo_ipc_heaps_HeapMemMP_CREATED) {
266 *handlePtr = NULL;
267 status = HeapMemMP_E_NOTFOUND;
268 }
269 else {
270 *handlePtr = (HeapMemMP_Handle)ti_sdo_ipc_heaps_HeapMemMP_create(
271 ¶ms, &eb);
272 if (*handlePtr == NULL) {
273 status = HeapMemMP_E_FAIL;
274 }
275 else {
276 status = HeapMemMP_S_SUCCESS;
277 }
278 }
280 return (status);
281 }
283 /*
284 * ======== HeapMemMP_restore ========
285 * The buffer should have the properly alignment at this
286 * point (either from instance$static$init in HeapMemMP.xs or
287 * from the above HeapMemMP_Instance_init).
288 */
289 Void HeapMemMP_restore(HeapMemMP_Handle handle)
290 {
291 ti_sdo_ipc_heaps_HeapMemMP_Object *obj =
292 (ti_sdo_ipc_heaps_HeapMemMP_Object *)handle;
294 ti_sdo_ipc_heaps_HeapMemMP_Header *begHeader;
296 /*
297 * Fill in the top of the memory block
298 * next: pointer will be NULL (end of the list)
299 * size: size of this block
300 * NOTE: no need to Cache_inv because obj->attrs->bufPtr should be const
301 */
302 begHeader = (ti_sdo_ipc_heaps_HeapMemMP_Header *)obj->buf;
303 begHeader->next = ti_sdo_ipc_SharedRegion_INVALIDSRPTR;
304 begHeader->size = obj->bufSize;
306 obj->attrs->head.next = obj->attrs->bufPtr;
307 if (obj->cacheEnabled) {
308 Cache_wbInv(&(obj->attrs->head),
309 sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header), Cache_Type_ALL,
310 FALSE);
311 Cache_wbInv(begHeader, sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header),
312 Cache_Type_ALL, TRUE);
313 }
314 }
316 /*
317 * ======== HeapMemMP_sharedMemReq ========
318 */
319 SizeT HeapMemMP_sharedMemReq(const HeapMemMP_Params *params)
320 {
321 SizeT memReq, minAlign;
322 UInt16 regionId;
324 /* Ensure that the sharedBufSize param has been set */
325 Assert_isTrue(params->sharedBufSize != 0, ti_sdo_ipc_Ipc_A_invParam);
327 if (params->sharedAddr == NULL) {
328 regionId = params->regionId;
329 }
330 else {
331 regionId = SharedRegion_getId(params->sharedAddr);
332 }
334 Assert_isTrue(regionId != SharedRegion_INVALIDREGIONID,
335 ti_sdo_ipc_Ipc_A_internal);
337 minAlign = sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header);
338 if (SharedRegion_getCacheLineSize(regionId) > minAlign) {
339 minAlign = SharedRegion_getCacheLineSize(regionId);
340 }
342 /* Add size of HeapBufMP Attrs */
343 memReq = _Ipc_roundup(sizeof(ti_sdo_ipc_heaps_HeapMemMP_Attrs), minAlign);
345 /* Add the buffer size */
346 memReq += params->sharedBufSize;
348 /* Make sure the size is a multiple of minAlign (round down) */
349 memReq = (memReq / minAlign) * minAlign;
351 return((SizeT)memReq);
352 }
354 /*
355 *************************************************************************
356 * Instance functions
357 *************************************************************************
358 */
360 /*
361 * ======== ti_sdo_ipc_heaps_HeapMemMP_Instance_init ========
362 */
363 Int ti_sdo_ipc_heaps_HeapMemMP_Instance_init(
364 ti_sdo_ipc_heaps_HeapMemMP_Object *obj,
365 const ti_sdo_ipc_heaps_HeapMemMP_Params *params,
366 Error_Block *eb)
367 {
368 SharedRegion_SRPtr sharedShmBase;
369 Ptr localAddr;
370 Int status;
372 /* Assert that sharedBufSize is sufficient */
373 Assert_isTrue(params->openFlag == TRUE ||
374 params->sharedBufSize != 0,
375 ti_sdo_ipc_Ipc_A_invParam);
377 obj->nsKey = NULL;
378 obj->allocSize = 0;
380 if (params->openFlag == TRUE) {
381 /* Opening the gate */
382 obj->attrs = (ti_sdo_ipc_heaps_HeapMemMP_Attrs *)
383 params->sharedAddr;
385 /* No need to Cache_inv- already done in openByAddr() */
386 obj->buf = (Char *)SharedRegion_getPtr(
387 obj->attrs->bufPtr);
388 obj->bufSize = obj->attrs->head.size;
389 obj->objType = ti_sdo_ipc_Ipc_ObjType_OPENDYNAMIC;
390 obj->regionId = SharedRegion_getId(obj->buf);
391 obj->cacheEnabled = SharedRegion_isCacheEnabled(obj->regionId);
393 /* Set minAlign */
394 obj->minAlign = sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header);
395 if (SharedRegion_getCacheLineSize(obj->regionId) > obj->minAlign) {
396 obj->minAlign = SharedRegion_getCacheLineSize(obj->regionId);
397 }
399 localAddr = SharedRegion_getPtr(obj->attrs->gateMPAddr);
401 status = GateMP_openByAddr(localAddr, (GateMP_Handle *)&(obj->gate));
402 if (status != GateMP_S_SUCCESS) {
403 Error_raise(eb, ti_sdo_ipc_Ipc_E_internal, 0, 0);
404 return(1);
405 }
407 return(0);
408 }
410 /* Creating the heap */
411 if (params->gate != NULL) {
412 obj->gate = params->gate;
413 }
414 else {
415 /* If no gate specified, get the default system gate */
416 obj->gate = (ti_sdo_ipc_GateMP_Handle)GateMP_getDefaultRemote();
417 }
419 obj->bufSize = params->sharedBufSize;
421 if (params->sharedAddr == NULL) {
422 /* Creating using a shared region ID */
423 obj->objType = ti_sdo_ipc_Ipc_ObjType_CREATEDYNAMIC_REGION;
424 obj->attrs = NULL; /* Will be alloc'ed in postInit */
425 obj->regionId = params->regionId;
426 }
427 else {
428 /* Creating using sharedAddr */
429 obj->regionId = SharedRegion_getId(params->sharedAddr);
431 /* Assert that the buffer is in a valid shared region */
432 Assert_isTrue(obj->regionId != SharedRegion_INVALIDREGIONID,
433 ti_sdo_ipc_Ipc_A_addrNotInSharedRegion);
435 /* Assert that sharedAddr is cache aligned */
436 Assert_isTrue(((UInt32)params->sharedAddr %
437 SharedRegion_getCacheLineSize(obj->regionId) == 0),
438 ti_sdo_ipc_Ipc_A_addrNotCacheAligned);
440 obj->objType = ti_sdo_ipc_Ipc_ObjType_CREATEDYNAMIC;
442 /* obj->buf will get alignment-adjusted in postInit */
443 obj->buf = (Ptr)((UInt32)params->sharedAddr +
444 sizeof(ti_sdo_ipc_heaps_HeapMemMP_Attrs));
445 obj->attrs = (ti_sdo_ipc_heaps_HeapMemMP_Attrs *)params->sharedAddr;
446 }
448 obj->cacheEnabled = SharedRegion_isCacheEnabled(obj->regionId);
450 /* Set minAlign */
451 obj->minAlign = sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header);
452 if (SharedRegion_getCacheLineSize(obj->regionId) > obj->minAlign) {
453 obj->minAlign = SharedRegion_getCacheLineSize(obj->regionId);
454 }
456 HeapMemMP_postInit(obj, eb);
457 if (Error_check(eb)) {
458 return(2);
459 }
461 /* Add entry to NameServer */
462 if (params->name != NULL) {
463 /* We will store a shared pointer in the NameServer */
464 sharedShmBase = SharedRegion_getSRPtr(obj->attrs,
465 obj->regionId);
466 obj->nsKey = NameServer_addUInt32((NameServer_Handle)
467 HeapMemMP_module->nameServer, params->name,
468 (UInt32)sharedShmBase);
470 if (obj->nsKey == NULL) {
471 /* NameServer_addUInt32 failed */
472 Error_raise(eb, ti_sdo_ipc_Ipc_E_nameFailed, params->name, 0);
473 return (3);
474 }
475 }
477 return (0);
478 }
480 /*
481 * ======== ti_sdo_ipc_heaps_HeapMemMP_Instance_finalize ========
482 */
483 Void ti_sdo_ipc_heaps_HeapMemMP_Instance_finalize(
484 ti_sdo_ipc_heaps_HeapMemMP_Object *obj, Int status)
485 {
486 if (obj->objType & (ti_sdo_ipc_Ipc_ObjType_CREATEDYNAMIC |
487 ti_sdo_ipc_Ipc_ObjType_CREATEDYNAMIC_REGION)) {
488 /* Remove entry from NameServer */
489 if (obj->nsKey != NULL) {
490 NameServer_removeEntry((NameServer_Handle)
491 HeapMemMP_module->nameServer, obj->nsKey);
492 }
494 if (obj->attrs != NULL) {
495 /* Set status to 'not created' */
496 obj->attrs->status = 0;
497 if (obj->cacheEnabled) {
498 Cache_wbInv(obj->attrs,
499 sizeof(ti_sdo_ipc_heaps_HeapMemMP_Attrs),
500 Cache_Type_ALL, TRUE);
501 }
502 }
504 /*
505 * Free the shared memory back to the region heap. If NULL, then the
506 * Memory_alloc failed.
507 */
508 if (obj->objType == ti_sdo_ipc_Ipc_ObjType_CREATEDYNAMIC_REGION
509 && obj->attrs != NULL) {
510 Memory_free(SharedRegion_getHeap(obj->regionId), obj->attrs,
511 obj->allocSize);
512 }
513 }
514 else {
515 /* Heap is being closed */
516 /* Close the gate. If NULL, then GateMP_openByAddr failed. */
517 if (obj->gate != NULL) {
518 GateMP_close((GateMP_Handle *)&(obj->gate));
519 }
520 }
521 }
523 /*
524 * NOTE:
525 * Embedded within the code for HeapMemMP_alloc and HeapMemMP_free are comments
526 * that can be used to match a shared memory reference with its required
527 * cache call. This is done because the code for alloc and free is complex.
528 * These two-character comments indicate
529 * 1) The type of cache operation that is being performed {A, B}
530 * A = Cache_inv
531 * B = Cache_wbInv
532 * 2) A numerical id of the specific cache call that is performed.
533 * 1, 2, 3
534 * For example, the comment 'A2' indicates that the corresponding cache call
535 * is a Cache_inv operation identified by the number '2'
536 */
538 /*
539 * ======== ti_sdo_ipc_heaps_HeapMemMP_alloc ========
540 * HeapMemMP is implemented such that all of the memory and blocks it works
541 * with have an alignment that is a multiple of the minimum alignment and have
542 * a size which is a multiple of the minAlign. Maintaining this requirement
543 * throughout the implementation ensures that there are never any odd
544 * alignments or odd block sizes to deal with.
545 *
546 * Specifically:
547 * The buffer managed by HeapMemMP:
548 * 1. Is aligned on a multiple of obj->minAlign
549 * 2. Has an adjusted size that is a multiple of obj->minAlign
550 * All blocks on the freelist:
551 * 1. Are aligned on a multiple of obj->minAlign
552 * 2. Have a size that is a multiple of obj->minAlign
553 * All allocated blocks:
554 * 1. Are aligned on a multiple of obj->minAlign
555 * 2. Have a size that is a multiple of obj->minAlign
556 *
557 */
558 Ptr ti_sdo_ipc_heaps_HeapMemMP_alloc(ti_sdo_ipc_heaps_HeapMemMP_Object *obj,
559 SizeT reqSize, SizeT reqAlign, Error_Block *eb)
560 {
561 IArg key;
562 ti_sdo_ipc_heaps_HeapMemMP_Header *prevHeader, *newHeader, *curHeader;
563 Char *allocAddr;
564 Memory_Size curSize, adjSize;
565 SizeT remainSize; /* free memory after allocated memory */
566 SizeT adjAlign, offset;
568 /* Assert that requested align is a power of 2 */
569 Assert_isTrue((reqAlign & (reqAlign - 1)) == 0,
570 ti_sdo_ipc_heaps_HeapMemMP_A_align);
572 /* Assert that requested block size is non-zero */
573 Assert_isTrue(reqSize != 0, ti_sdo_ipc_heaps_HeapMemMP_A_zeroBlock);
575 adjSize = (Memory_Size)reqSize;
577 /* Make size requested a multiple of obj->minAlign */
578 if ((offset = (adjSize & (obj->minAlign - 1))) != 0) {
579 adjSize = adjSize + (obj->minAlign - offset);
580 }
582 /*
583 * Make sure the alignment is at least as large as obj->minAlign
584 * Note: adjAlign must be a power of 2 (by function constraint) and
585 * obj->minAlign is also a power of 2,
586 */
587 adjAlign = reqAlign;
588 if (adjAlign & (obj->minAlign - 1)) {
589 /* adjAlign is less than obj->minAlign */
590 adjAlign = obj->minAlign;
591 }
593 /* No need to Cache_inv Attrs- 'head' should be constant */
594 prevHeader = &(obj->attrs->head);
596 key = GateMP_enter((GateMP_Handle)obj->gate);
598 /*
599 * The block will be allocated from curHeader. Maintain a pointer to
600 * prevHeader so prevHeader->next can be updated after the alloc.
601 */
602 if (obj->cacheEnabled) {
603 Cache_inv(prevHeader, sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header),
604 Cache_Type_ALL, TRUE); /* A1 */
605 }
606 curHeader = (ti_sdo_ipc_heaps_HeapMemMP_Header *)
607 SharedRegion_getPtr(prevHeader->next); /* A1 */
609 /* Loop over the free list. */
610 while (curHeader != NULL) {
611 /* Invalidate curHeader */
612 if (obj->cacheEnabled) {
613 Cache_inv(curHeader, sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header),
614 Cache_Type_ALL, TRUE); /* A2 */
615 }
616 curSize = curHeader->size;
618 /*
619 * Determine the offset from the beginning to make sure
620 * the alignment request is honored.
621 */
622 offset = (Memory_Size)curHeader & (adjAlign - 1);
623 if (offset) {
624 offset = adjAlign - offset;
625 }
627 /* Internal Assert that offset is a multiple of obj->minAlign */
628 Assert_isTrue(((offset & (obj->minAlign - 1)) == 0),
629 ti_sdo_ipc_Ipc_A_internal);
631 /* big enough? */
632 if (curSize >= (adjSize + offset)) {
634 /* Set the pointer that will be returned. Alloc from front */
635 allocAddr = (Char *)((Memory_Size)curHeader + offset);
637 /*
638 * Determine the remaining memory after the allocated block.
639 * Note: this cannot be negative because of above comparison.
640 */
641 remainSize = curSize - adjSize - offset;
643 /* Internal Assert that remainSize is a multiple of obj->minAlign */
644 Assert_isTrue(((remainSize & (obj->minAlign - 1)) == 0),
645 ti_sdo_ipc_Ipc_A_internal);
647 /*
648 * If there is memory at the beginning (due to alignment
649 * requirements), maintain it in the list.
650 *
651 * offset and remainSize must be multiples of
652 * sizeof(HeapMemMP_Header). Therefore the address of the newHeader
653 * below must be a multiple of the sizeof(HeapMemMP_Header), thus
654 * maintaining the requirement.
655 */
656 if (offset) {
658 /* Adjust the curHeader size accordingly */
659 curHeader->size = offset; /* B2 */
660 /* Cache wb at end of this if block */
662 /*
663 * If there is remaining memory, add into the free list.
664 * Note: no need to coalesce and we have HeapMemMP locked so
665 * it is safe.
666 */
667 if (remainSize) {
668 newHeader = (ti_sdo_ipc_heaps_HeapMemMP_Header *)
669 ((Memory_Size)allocAddr + adjSize);
671 /* curHeader has been inv at top of 'while' loop */
672 newHeader->next = curHeader->next; /* B1 */
673 newHeader->size = remainSize; /* B1 */
674 if (obj->cacheEnabled) {
675 /* Writing back curHeader will cache-wait */
676 Cache_wbInv(newHeader,
677 sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header),
678 Cache_Type_ALL, FALSE); /* B1 */
679 }
681 curHeader->next = SharedRegion_getSRPtr(newHeader,
682 obj->regionId);
683 }
684 /* Write back (and invalidate) newHeader and curHeader */
685 if (obj->cacheEnabled) {
686 /* B2 */
687 Cache_wbInv(curHeader,
688 sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header),
689 Cache_Type_ALL, TRUE);
690 }
691 }
692 else {
693 /*
694 * If there is any remaining, link it in,
695 * else point to the next free block.
696 * Note: no need to coalesce and we have HeapMemMP locked so
697 * it is safe.
698 */
699 if (remainSize) {
700 newHeader = (ti_sdo_ipc_heaps_HeapMemMP_Header *)
701 ((Memory_Size)allocAddr + adjSize);
703 newHeader->next = curHeader->next; /* A2, B3 */
704 newHeader->size = remainSize; /* B3 */
706 if (obj->cacheEnabled) {
707 /* Writing back prevHeader will cache-wait */
708 Cache_wbInv(newHeader,
709 sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header),
710 Cache_Type_ALL, FALSE); /* B3 */
711 }
713 /* B4 */
714 prevHeader->next = SharedRegion_getSRPtr(newHeader,
715 obj->regionId);
716 }
717 else {
718 /* curHeader has been inv at top of 'while' loop */
719 prevHeader->next = curHeader->next; /* A2, B4 */
720 }
722 if (obj->cacheEnabled) {
723 /* B4 */
724 Cache_wbInv(prevHeader,
725 sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header),
726 Cache_Type_ALL, TRUE);
727 }
728 }
730 GateMP_leave((GateMP_Handle)obj->gate, key);
732 /* Success, return the allocated memory */
733 return ((Ptr)allocAddr);
734 }
735 else {
736 prevHeader = curHeader;
737 curHeader = SharedRegion_getPtr(curHeader->next);
738 }
739 }
741 GateMP_leave((GateMP_Handle)obj->gate, key);
743 Error_raise(eb, ti_sdo_ipc_heaps_HeapMemMP_E_memory, (IArg)obj,
744 (IArg)reqSize);
746 return (NULL);
747 }
749 /*
750 * ======== ti_sdo_ipc_heaps_HeapMemMP_free ========
751 */
752 Void ti_sdo_ipc_heaps_HeapMemMP_free(ti_sdo_ipc_heaps_HeapMemMP_Object *obj,
753 Ptr addr, SizeT size)
754 {
755 IArg key;
756 ti_sdo_ipc_heaps_HeapMemMP_Header *curHeader, *newHeader, *nextHeader;
757 SizeT offset;
759 /* Assert that 'addr' is cache aligned */
760 Assert_isTrue(((UInt32)addr % obj->minAlign == 0),
761 ti_sdo_ipc_Ipc_A_addrNotCacheAligned);
763 /* Restore size to actual allocated size */
764 offset = size & (obj->minAlign - 1);
765 if (offset != 0) {
766 size += obj->minAlign - offset;
767 }
769 newHeader = (ti_sdo_ipc_heaps_HeapMemMP_Header *)addr;
771 /*
772 * Invalidate entire buffer being freed to ensure that stale cache
773 * data in block isn't evicted later
774 */
775 if (obj->cacheEnabled) {
776 Cache_inv(newHeader, size, Cache_Type_ALL, FALSE);
777 }
779 /*
780 * obj->attrs never changes, doesn't need Gate protection
781 * and Cache invalidate
782 */
783 curHeader = &(obj->attrs->head);
785 key = GateMP_enter((GateMP_Handle)obj->gate);
787 if (obj->cacheEnabled) {
788 /* A1 */
789 Cache_inv(curHeader, sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header),
790 Cache_Type_ALL, TRUE);
791 }
793 nextHeader = SharedRegion_getPtr(curHeader->next);
795 /* Make sure the entire buffer is in the range of the heap. */
796 Assert_isTrue((((SizeT)newHeader >= (SizeT)obj->buf) &&
797 ((SizeT)newHeader + size <=
798 (SizeT)obj->buf + obj->bufSize)),
799 ti_sdo_ipc_heaps_HeapMemMP_A_invalidFree);
801 /* Go down freelist and find right place for buf */
802 while (nextHeader != NULL && nextHeader < newHeader) {
803 if (obj->cacheEnabled) {
804 Cache_inv(nextHeader, sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header),
805 Cache_Type_ALL, FALSE); /* A2 */
806 }
808 /* Make sure the addr is not in this free block */
809 Assert_isTrue((SizeT)newHeader >= (SizeT)nextHeader + nextHeader->size,
810 ti_sdo_ipc_heaps_HeapMemMP_A_invalidFree); /* A2 */
811 curHeader = nextHeader;
812 /* A2 */
813 nextHeader = SharedRegion_getPtr(nextHeader->next);
814 }
816 /* B2 */
817 newHeader->next = SharedRegion_getSRPtr(nextHeader, obj->regionId);
818 newHeader->size = size;
820 /* B1, A1 */
821 curHeader->next = SharedRegion_getSRPtr(newHeader, obj->regionId);
823 /* Join contiguous free blocks */
824 if (nextHeader != NULL) {
825 /*
826 * Verify the free size is not overlapping. Not all cases are
827 * detectable, but it is worth a shot. Note: only do this
828 * assert if nextHeader is non-NULL.
829 */
830 Assert_isTrue(((SizeT)newHeader + size) <= (SizeT)nextHeader,
831 ti_sdo_ipc_heaps_HeapMemMP_A_invalidFree);
833 /* Join with upper block */
834 if (((Memory_Size)newHeader + size) == (Memory_Size)nextHeader) {
835 if (obj->cacheEnabled) {
836 Cache_inv(nextHeader, sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header),
837 Cache_Type_ALL, TRUE);
838 }
839 newHeader->next = nextHeader->next; /* A2, B2 */
840 newHeader->size += nextHeader->size; /* A2, B2 */
841 size += obj->minAlign;
843 /* Don't Cache_wbInv, this will be done later */
844 }
845 }
847 /*
848 * Join with lower block. Make sure to check to see if not the
849 * first block. No need to invalidate attrs since head shouldn't change.
850 */
851 if ((curHeader != &obj->attrs->head) &&
852 ((Memory_Size)curHeader + curHeader->size == (Memory_Size)newHeader)) {
853 /*
854 * Don't Cache_inv newHeader since newHeader has data that
855 * hasn't been written back yet (B2)
856 */
857 curHeader->next = newHeader->next; /* B1, B2 */
858 curHeader->size += newHeader->size; /* B1, B2 */
859 }
861 if (obj->cacheEnabled) {
862 Cache_wbInv(curHeader, sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header),
863 Cache_Type_ALL, FALSE); /* B1 */
864 /*
865 * writeback invalidate the new header
866 */
867 Cache_wbInv(newHeader, sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header),
868 Cache_Type_ALL, TRUE); /* B2 */
869 }
871 GateMP_leave((GateMP_Handle)obj->gate, key);
872 }
874 /*
875 * ======== HeapMemMP_isBlocking ========
876 */
877 Bool ti_sdo_ipc_heaps_HeapMemMP_isBlocking(
878 ti_sdo_ipc_heaps_HeapMemMP_Object *obj)
879 {
880 Bool flag = FALSE;
882 // TODO figure out how to determine whether the gate is blocking...
883 return (flag);
884 }
886 /*
887 * ======== HeapMemMP_getStats ========
888 */
889 Void ti_sdo_ipc_heaps_HeapMemMP_getStats(ti_sdo_ipc_heaps_HeapMemMP_Object *obj,
890 Memory_Stats *stats)
891 {
892 IArg key;
893 ti_sdo_ipc_heaps_HeapMemMP_Header *curHeader;
895 stats->totalSize = obj->bufSize;
896 stats->totalFreeSize = 0; /* determined later */
897 stats->largestFreeSize = 0; /* determined later */
899 key = GateMP_enter((GateMP_Handle)obj->gate);
901 if (obj->cacheEnabled) {
902 Cache_inv(&(obj->attrs->head),
903 sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header), Cache_Type_ALL,
904 TRUE);
905 }
907 curHeader = SharedRegion_getPtr(obj->attrs->head.next);
909 while (curHeader != NULL) {
910 /* Invalidate curHeader */
911 if (obj->cacheEnabled) {
912 Cache_inv(curHeader, sizeof(ti_sdo_ipc_heaps_HeapMemMP_Header),
913 Cache_Type_ALL, TRUE);
914 }
915 stats->totalFreeSize += curHeader->size;
916 if (stats->largestFreeSize < curHeader->size) {
917 stats->largestFreeSize = curHeader->size;
918 }
919 curHeader = SharedRegion_getPtr(curHeader->next);
920 }
922 GateMP_leave((GateMP_Handle)obj->gate, key);
923 }
925 /*
926 *************************************************************************
927 * Internal functions
928 *************************************************************************
929 */
931 /*
932 * ======== ti_sdo_ipc_heaps_HeapMemMP_postInit ========
933 */
934 Void ti_sdo_ipc_heaps_HeapMemMP_postInit(ti_sdo_ipc_heaps_HeapMemMP_Object *obj,
935 Error_Block *eb)
936 {
937 HeapMemMP_Params params;
938 IHeap_Handle regionHeap;
940 if (obj->attrs == NULL) {
941 /* Need to allocate from the heap */
942 HeapMemMP_Params_init(¶ms);
943 params.regionId = obj->regionId;
944 params.sharedBufSize = obj->bufSize;
945 obj->allocSize = HeapMemMP_sharedMemReq(¶ms);
947 regionHeap = SharedRegion_getHeap(obj->regionId);
948 Assert_isTrue(regionHeap != NULL, ti_sdo_ipc_SharedRegion_A_noHeap);
949 obj->attrs = Memory_alloc(regionHeap,
950 obj->allocSize,
951 obj->minAlign, eb);
952 if (obj->attrs == NULL) {
953 return;
954 }
956 obj->buf = (Ptr)((UInt32)obj->attrs +
957 sizeof(ti_sdo_ipc_heaps_HeapMemMP_Attrs));
958 }
960 /* Round obj->buf up by obj->minAlign */
961 obj->buf = (Ptr)_Ipc_roundup(obj->buf, obj->minAlign);
963 /* Verify the buffer is large enough */
964 Assert_isTrue((obj->bufSize >=
965 SharedRegion_getCacheLineSize(obj->regionId)),
966 ti_sdo_ipc_heaps_HeapMemMP_A_heapSize);
968 /* Make sure the size is a multiple of obj->minAlign */
969 obj->bufSize = (obj->bufSize / obj->minAlign) * obj->minAlign;
971 obj->attrs->gateMPAddr = ti_sdo_ipc_GateMP_getSharedAddr(obj->gate);
972 obj->attrs->bufPtr = SharedRegion_getSRPtr(obj->buf, obj->regionId);
974 /* Store computed obj->bufSize in shared mem */
975 obj->attrs->head.size = obj->bufSize;
977 /* Place the initial header */
978 HeapMemMP_restore((HeapMemMP_Handle)obj);
980 /* Last thing, set the status */
981 obj->attrs->status = ti_sdo_ipc_heaps_HeapMemMP_CREATED;
983 if (obj->cacheEnabled) {
984 Cache_wbInv(obj->attrs, sizeof(ti_sdo_ipc_heaps_HeapMemMP_Attrs),
985 Cache_Type_ALL, TRUE);
986 }
988 }