329cf9682f58a26f9e359c2fc3d0b90cceabf28b
1 /******************************************************************************
2 * FILE PURPOSE: Functions to OSAL related routines for running NWAL, PA, QMSS,etc
3 ******************************************************************************
4 * FILE NAME: osal.c
5 *
6 * DESCRIPTION: Functions to initialize framework resources for running NWAL
7 *
8 * REVISION HISTORY:
9 *
10 * Copyright (c) Texas Instruments Incorporated 2010-2011
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the
22 * distribution.
23 *
24 * Neither the name of Texas Instruments Incorporated nor the names of
25 * its contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 */
42 /* CSL RL includes */
43 #include <ti/csl/cslr_device.h>
44 #include <ti/csl/csl_pscAux.h>
45 #include <ti/csl/csl_semAux.h>
46 #include <ti/csl/csl_cacheAux.h>
47 #include <ti/csl/csl_xmcAux.h>
49 #include <stdlib.h>
50 #include <stdio.h>
52 #include "netapi_tune.h"
53 #include "netapi_vm.h"
54 #include "netapi_timer.h"
55 #include <unistd.h>
56 #include <sys/mman.h>
57 #include <sys/types.h>
58 #include <sys/stat.h>
59 #include <sys/ioctl.h>
60 #include <fcntl.h>
61 #include <errno.h>
62 #include <string.h>
63 #include "netapi_util.h"
64 #include "tools/module/netapimod.h"
65 #define System_printf printf
67 uint32_t Osal_qmss_MallocCounter =0;
68 uint32_t Osal_qmss_FreeCounter =0;
69 uint32_t Osal_cppi_MallocCounter =0;
70 uint32_t Osal_cppi_FreeCounter =0;
72 void* Osal_saGetSCPhyAddr(void* vaddr);
76 /* TODO: */
77 #define DNUM 0
79 #if 0
80 uint32_t globalCritkey;
82 /* Lock to be used for critical section */
83 pthread_mutex_t mutex_lock;
85 void nwalTest_osalInit()
86 {
87 pthread_mutex_init(&mutex_lock, NULL);
88 return;
89 }
91 void nwalTest_osalshutdown()
92 {
93 pthread_mutex_destroy(&mutex_lock);
94 return;
95 }
97 static inline void nwalTest_osalEnterCS()
98 {
99 #if 0
100 pthread_mutex_lock(&mutex_lock);
101 #endif
102 return;
103 }
105 static inline void nwalTest_osalLeaveCS()
106 {
108 #if 0
109 pthread_mutex_unlock(&mutex_lock);
110 #endif
111 return;
112 }
114 #endif
118 /**********USE SPACE ACCESS TO KERNEL MEMORY SERVICES*************/
119 static int netapi_fd;
121 /***init **/
122 int netapi_utilModInit(void)
123 {
124 netapi_fd = open("/dev/netapi", O_RDWR);
126 if (netapi_fd == -1) {
127 return -1;
128 }
129 return netapi_fd;
130 }
132 /***close **/
133 void netapi_utilModClose(void)
134 {
135 close(netapi_fd);
136 }
138 /* return physical address of region kernel module has allocated for us */
139 unsigned long netapi_utilGetPhysOfBufferArea(void)
140 {
141 unsigned long physp;
143 if (ioctl(netapi_fd, NETAPIMOD_IOCGETPHYS | NETAPIMOD_IOCMAGIC, &physp) == -1) {
144 return 0;
145 }
146 return physp;
147 }
149 /* return the size of that region */
150 unsigned long netapi_utilGetSizeOfBufferArea(void)
151 {
152 unsigned long size;
154 if (ioctl(netapi_fd, NETAPIMOD_IOCGETSIZE | NETAPIMOD_IOCMAGIC, &size) == -1) {
155 return 0;
156 }
157 return size;
158 }
160 //*****for the actual wb, inv cache ops, call the osal_xxx version, not these directly
161 // (so make inline)
162 /** write back operation on block */
163 static inline int _netapi_utilCacheWb(void *ptr, size_t size)
164 {
165 struct netapimod_block block;
167 block.addr = (unsigned long)ptr;
168 block.size = size;
170 if (ioctl(netapi_fd, NETAPIMOD_IOCCACHEWB | NETAPIMOD_IOCMAGIC, &block) == -1) {
171 return -1;
172 }
173 return 0;
174 }
175 int netapi_utilCacheWb(void *ptr, size_t size) {return _netapi_utilCacheWb(ptr,size);}
177 /** write back & invalidate **/
178 static inline int _netapi_utilCacheWbInv(void *ptr, size_t size)
179 {
180 struct netapimod_block block;
182 block.addr = (unsigned long)ptr;
183 block.size = size;
185 if (ioctl(netapi_fd, NETAPIMOD_IOCCACHEWBINV | NETAPIMOD_IOCMAGIC, &block) == -1) {
186 return -1;
187 }
188 return 0;
189 }
191 int netapi_utilCacheWbInv(void *ptr, size_t size) {return _netapi_utilCacheWbInv(ptr,size);}
192 /** just invalidate **/
193 static inline int _netapi_utilCacheInv(void *ptr, size_t size)
194 {
195 struct netapimod_block block;
197 block.addr = (unsigned long)ptr;
198 block.size = size;
200 if (ioctl(netapi_fd, NETAPIMOD_IOCCACHEINV | NETAPIMOD_IOCMAGIC, &block) == -1) {
201 return -1;
202 }
203 return 0;
204 }
205 int netapi_utilCacheInv(void *ptr, size_t size) {return _netapi_utilCacheInv(ptr,size);}
207 //***mmap the block into our user space process memory map. */
208 unsigned long netapi_utilGetVaOfBufferArea(unsigned int offset, unsigned int size)
209 {
210 void *userp;
212 /* Map the physical address to user space */
213 userp = mmap(0, // Preferred start address
214 size, // Length to be mapped
215 PROT_WRITE | PROT_READ, // Read and write access
216 MAP_SHARED, // Shared memory
217 netapi_fd, // File descriptor
218 offset); // The byte offset from fd
220 if (userp == MAP_FAILED) {
221 return 0;
222 }
223 return (unsigned long)userp;
224 }
226 static unsigned int cache_op_cycles=0;
227 static unsigned int n_cache_op_cycles=0;
228 void Osal_cache_op_measure_reset(void) { cache_op_cycles=0; n_cache_op_cycles=0;}
229 /*****************************************************************************
230 * FUNCTION PURPOSE: Cache Invalidation Routine
231 *****************************************************************************
232 * DESCRIPTION: Cache Invalidation Routine
233 *****************************************************************************/
234 static inline void Osal_invalidateCache (void *blockPtr, uint32_t size)
235 {
236 #ifdef NETAPI_TUNE_USE_CACHE_OPS
237 register unsigned int v1;
238 register unsigned int v2;
240 v1= netapi_timing_stop();
241 if (((uint8_t*)blockPtr <netapi_VM_mem_start)||( (uint8_t*)blockPtr>netapi_VM_mem_end)) return;
242 _netapi_utilCacheInv(blockPtr, size);
243 v2= netapi_timing_stop();
244 cache_op_cycles += (v2-v1);
245 n_cache_op_cycles+=1;
246 #endif
248 return;
249 }
251 /*****************************************************************************
252 * FUNCTION PURPOSE: Cache Writeback Routine
253 *****************************************************************************
254 * DESCRIPTION: Cache Invalidation Routine
255 *****************************************************************************/
256 /* stats */
258 unsigned int Osal_cache_op_measure(int * p_n) { *p_n = n_cache_op_cycles; return cache_op_cycles;}
260 static inline void Osal_writeBackCache (void *blockPtr, uint32_t size)
261 {
262 #ifdef NETAPI_TUNE_USE_CACHE_OPS
263 register unsigned int v1;
264 register unsigned int v2;
266 v1= netapi_timing_stop();
267 if (((uint8_t*)blockPtr <netapi_VM_mem_start)||( (uint8_t*)blockPtr>netapi_VM_mem_end)) return;
268 _netapi_utilCacheWbInv(blockPtr, size);
269 v2= netapi_timing_stop();
270 cache_op_cycles += (v2-v1);
271 n_cache_op_cycles+=1;
272 #endif
273 return;
274 }
277 void * Osal_qmssMtCsEnter()
278 {
279 /* Stub Function. TBD: Would need to handle when for multi proc access
280 * To be handled once infrastructure is available from Kernel
281 */
282 return NULL;
283 }
286 void Osal_qmssMtCsExit(void *key)
287 {
288 /* Stub Function. TBD: Would need to handle when for multi proc access
289 * To be handled once infrastructure is available from Kernel
290 */
291 return;
292 }
294 void Osal_nwalCsEnter(uint32_t *key)
295 {
296 /* Stub Function. TBD: Would need to handle when for multi proc access
297 * To be handled once infrastructure is available from Kernel
298 */
299 return;
300 }
302 void Osal_nwalCsExit(uint32_t key)
303 {
304 /* Stub Function. TBD: Would need to handle when for multi proc access
305 * To be handled once infrastructure is available from Kernel
306 */
307 return;
308 }
311 void Osal_qmssLog ( String fmt, ... )
312 {
313 }
316 void Osal_cppiCsEnter (uint32_t *key)
317 {
319 /* Stub Function. TBD: Would need to handle when for multi proc access
320 * To be handled once infrastructure is available from Kernel
321 */
322 return;
323 }
325 void Osal_cppiCsExit (uint32_t key)
326 {
328 /* Stub Function. TBD: Would need to handle when for multi proc access
329 * To be handled once infrastructure is available from Kernel
330 */
331 return;
332 }
334 void Osal_cppiLog ( String fmt, ... )
335 {
336 }
338 void Osal_paBeginMemAccess (Ptr addr, uint32_t size)
339 {
340 /* Stub Function. TBD: Would need to handle when for multi proc access
341 * To be handled once infrastructure is available from Kernel
342 */
344 }
346 void Osal_paEndMemAccess (Ptr addr, uint32_t size)
347 {
348 /* Stub Function. TBD: Would need to handle when for multi proc access
349 * To be handled once infrastructure is available from Kernel
350 */
351 }
352 void Osal_paMtCsEnter (uint32_t *key)
353 {
354 /* Stub Function. TBD: Would need to handle when for multi proc access
355 * To be handled once infrastructure is available from Kernel
356 */
357 }
358 void Osal_paMtCsExit (uint32_t key)
359 {
360 /* Stub Function. TBD: Would need to handle when for multi proc access
361 * To be handled once infrastructure is available from Kernel
362 */
363 }
366 void* Osal_qmssCsEnter ()
367 {
369 /* Stub Function. TBD: Would need to handle when for multi proc access
370 * To be handled once infrastructure is available from Kernel
371 */
372 return(NULL);
373 }
375 void Osal_qmssCsExit (void * key)
376 {
377 /* Stub Function. TBD: Would need to handle when for multi proc access
378 * To be handled once infrastructure is available from Kernel
379 */
380 return;
381 }
383 Ptr Osal_qmssMalloc (uint32_t num_bytes)
384 {
385 Ptr ret;
387 Osal_qmss_MallocCounter++;
388 ret = malloc (num_bytes);
389 if(ret==NULL)
390 {
391 System_printf("\nERROR! QMSS Malloc failed!\n");
392 }
394 return ret;
395 }
397 void Osal_qmssFree (Ptr ptr, uint32_t size)
398 {
399 /* Increment the free counter. */
400 Osal_qmss_FreeCounter++;
401 free(ptr);
402 }
404 Ptr Osal_cppiMalloc (uint32_t num_bytes)
405 {
406 Ptr ret;
408 Osal_cppi_MallocCounter++;
409 num_bytes += (CACHE_L2_LINESIZE-1);
410 ret = malloc (num_bytes);
412 if(ret==NULL)
413 {
414 System_printf("\nERROR! CPPI Malloc failed!\n");
415 }
417 return ret;
418 }
420 void Osal_cppiFree (Ptr ptr, uint32_t size)
421 {
422 /* Increment the free counter. */
423 Osal_cppi_FreeCounter++;
424 free(ptr);
425 }
427 void Osal_qmssBeginMemAccess (void *blockPtr, uint32_t size)
428 {
429 //Osal_invalidateCache(blockPtr,size);
430 return;
431 }
433 void Osal_qmssEndMemAccess (void *blockPtr, uint32_t size)
434 {
435 //Osal_writeBackCache(blockPtr,size);
436 return;
437 }
439 void Osal_cppiBeginMemAccess (void *blockPtr, uint32_t size)
440 {
441 //Osal_invalidateCache(blockPtr,size);
442 return;
443 }
445 void Osal_cppiEndMemAccess (void *blockPtr, uint32_t size)
446 {
447 //Osal_writeBackCache(blockPtr,size);
448 return;
449 }
451 void Osal_nwalInvalidateCache (void *blockPtr, uint32_t size)
452 {
453 //Osal_invalidateCache(blockPtr,size);
454 return;
455 }
457 void Osal_nwalWriteBackCache (void *blockPtr, uint32_t size)
458 {
459 //Osal_writeBackCache(blockPtr,size);
460 return;
461 }
463 uint32_t Osal_nwalGetCacheLineSize (void )
464 {
465 /* By default assumes L2 cache line is enabled. If not return CACHE_L1D_LINESIZE */
466 return (CACHE_L2_LINESIZE);
467 }
469 /********************************************************************
470 * FUNCTION PURPOSE: Convert local address to global
471 ********************************************************************
472 * DESCRIPTION: Returns global address
473 ********************************************************************/
475 unsigned int Osal_nwalLocToGlobAddr(unsigned int x)
476 {
477 return x;
478 }
480 uint16_t Osal_nwalGetProcId (void )
481 {
482 return DNUM;
483 }
484 uint64_t Osal_nwalGetTimeStamp(void)
485 {
486 /* Stub function to return timestamp
487 */
488 return netapi_getTimestamp();
489 }
491 uint16_t Osal_saGetProcId (void )
492 {
493 return 0;
494 }
496 void* Osal_saGetSCPhyAddr(void* vaddr)
497 {
498 if(vaddr == NULL)
499 {
500 return NULL;
501 }
502 return (void *)(netapi_VM_mem_start_phy + ((uint8_t*) vaddr - netapi_VM_mem_start));
504 }
506 void Osal_saBeginScAccess (void* addr, uint32_t size)
507 {
508 Osal_invalidateCache(addr,size);
510 }
512 void Osal_saEndScAccess (void* addr, uint32_t size)
513 {
514 Osal_writeBackCache(addr,size);
516 }
519 void Osal_saCsEnter (uint32_t *key)
520 {
521 /* Stub Function. TBD: Would need to handle when for multi proc access
522 * To be handled once infrastructure is available from Kernel
523 */
524 //((CSL_semAcquireDirect (SA_HW_SEM)) == 0);
525 return;
526 }
528 void Osal_saCsExit (uint32_t key)
529 {
530 /* Stub Function. TBD: Would need to handle when for multi proc access
531 * To be handled once infrastructure is available from Kernel
532 */
533 return;
534 }
537 void Osal_saMtCsEnter (uint32_t *key)
538 {
539 /* Stub Function. TBD: Would need to handle when for multi proc access
540 * To be handled once infrastructure is available from Kernel
541 */
542 return;
543 }
545 void Osal_saMtCsExit (uint32_t key)
546 {
547 /* Stub Function. TBD: Would need to handle when for multi proc access
548 * To be handled once infrastructure is available from Kernel
549 */
550 return;
551 }
553 void Osal_saBeginMemAccess (void *blockPtr, uint32_t size)
554 {
555 Osal_invalidateCache(blockPtr,size);
556 return;
557 }
559 void Osal_saEndMemAccess (void *blockPtr, uint32_t size)
560 {
561 Osal_writeBackCache(blockPtr,size);
562 return;
563 }
567 void Osal_pktLibBeginMemAccess(void* ptr, uint32_t size)
568 {
569 //Osal_invalidateCache(ptr,size);
570 }
573 void Osal_pktLibEndMemAccess(void* ptr, uint32_t size)
574 {
575 //Osal_writeBackCache(ptr,size);
576 }
579 void Osal_pktLibBeginPktAccess(Pktlib_HeapHandle heapHandle, Ti_Pkt* ptrPkt, uint32_t size)
580 {
581 Osal_invalidateCache(ptrPkt,size);
582 }
585 void Osal_pktLibEndPktAccess(Pktlib_HeapHandle heapHandle, Ti_Pkt* ptrPkt, uint32_t size)
586 {
588 /* Cache Write back for the packet. Currently being disabled as it will be done during
589 * QMSS Push operation
591 Osal_writeBackCache((void *)ptrPkt,size);
592 */
593 }
596 void* Osal_pktLibEnterCriticalSection(Pktlib_HeapHandle heapHandle)
597 {
598 /* TODO: We should use the 'heapHandle' and compare it with what we got from the
599 * 'create/find' HEAP API & depending upon the comparison take appropriate action.
600 * Implementations here could range from a MULTI-THREAD protection if the packets in
601 * the heap are being accessed across multiple threads or MULTI-CORE if the packets
602 * are being accessed across multiple cores and features: split and clone are used.
603 * For NWAL layer no protection required.
604 *
605 * For testing we are not doing any of this so we are simply setting it to NOOP */
606 return NULL;
607 }
610 void Osal_pktLibExitCriticalSection(Pktlib_HeapHandle heapHandle, void* csHandle)
611 {
612 /* TODO: We should use the 'heapHandle' and compare it with what we got from the
613 * 'create/find' HEAP API & depending upon the comparison take appropriate action.
614 * Implementations here could range from a MULTI-THREAD protection if the packets in
615 * the heap are being accessed across multiple threads or MULTI-CORE if the packets
616 * are being accessed across multiple cores and features: split and clone are used.
617 * For NWAL layer no protection required..
618 *
619 * For testing we are not doing any of this so we are simply setting it to NOOP */
620 return;
621 }
624 void* Osal_pktLibPhyToVirt(void *ptr)
625 {
626 return(_Osal_qmssPhyToVirt(ptr));
627 }
629 void* Osal_qmssVirtToPhy (void *ptr)
630 {
631 return _Osal_qmssVirtToPhy(ptr);
632 }
634 void * Osal_qmssPhyToVirt (void *ptr)
635 {
636 return _Osal_qmssPhyToVirt(ptr);
637 }
639 /******************************************************************************
640 * Function to traverse a CPPI descriptor and convert all address references
641 * from virtual to physical.
642 ******************************************************************************/
643 //#define ASSUME_ALL_DESCRIPTOR //define this if mono and host descriptors are present, else don't
644 //define and just host will be assumed (more efficient)
645 void* Osal_qmssConvertDescVirtToPhy(void *descAddr)
646 {
647 if (!descAddr) return (void *)0;
648 #ifdef ASSUME_ALL_DESCRIPTOR
649 if (Cppi_getDescType((Cppi_Desc *)QMSS_DESC_PTR(descAddr)) == Cppi_DescType_HOST)
650 #endif
651 {
652 Cppi_HostDesc *nextBDPtr = (Cppi_HostDesc *)QMSS_DESC_PTR(descAddr);
653 Cppi_HostDesc *prevBDPtr = 0;
654 while (nextBDPtr)
655 {
656 void *buffPtr=NULL;
657 if (nextBDPtr->buffPtr)
658 {
659 buffPtr = (void *)nextBDPtr->buffPtr;
660 nextBDPtr->buffPtr = (uint32_t)_Osal_qmssVirtToPhy((void *)(nextBDPtr->buffPtr));
661 if (!(nextBDPtr->buffPtr)) return (void *)0;
662 Osal_writeBackCache(buffPtr, nextBDPtr->buffLen);
663 }
665 if (nextBDPtr->origBuffPtr)
666 {
667 nextBDPtr->origBuffPtr = (uint32_t)_Osal_qmssVirtToPhy((void *)(nextBDPtr->origBuffPtr));
668 if (!(nextBDPtr->origBuffPtr)) return (void *)0;
669 }
671 prevBDPtr = nextBDPtr;
672 nextBDPtr = (Cppi_HostDesc *)QMSS_DESC_PTR((nextBDPtr->nextBDPtr));
673 if (prevBDPtr->nextBDPtr)
674 {
675 prevBDPtr->nextBDPtr = (uint32_t)_Osal_qmssVirtToPhy((void *)(prevBDPtr->nextBDPtr));
676 if (!(prevBDPtr->nextBDPtr)) return (void *)0;
677 }
679 Osal_writeBackCache(prevBDPtr, TUNE_NETAPI_DESC_SIZE);
680 }
681 descAddr = _Osal_qmssVirtToPhy(descAddr);
682 if (!descAddr) return (void *)0;
683 }
684 #ifdef ASSUME_ALL_DESCRIPTOR
685 else if (Cppi_getDescType((Cppi_Desc *)QMSS_DESC_PTR(descAddr)) == Cppi_DescType_MONOLITHIC)
686 {
687 Osal_writeBackCache(descAddr, TUNE_NETAPI_DESC_SIZE);
688 descAddr = _Osal_qmssVirtToPhy(descAddr);
689 if (!descAddr) return (void *)0;
690 }
691 #endif
692 return descAddr;
694 }
697 /******************************************************************************
698 * Function to traverse a CPPI descriptor and convert all address references
699 * from physical to virtual.
700 ******************************************************************************/
701 void* Osal_qmssConvertDescPhyToVirt(void *descAddr)
702 {
703 if (!descAddr) return (void *)0;
704 descAddr = _Osal_qmssPhyToVirt(descAddr);
706 #ifdef ASSUME_ALL_DESCRIPTOR
707 if (Cppi_getDescType((Cppi_Desc *)QMSS_DESC_PTR(descAddr)) == Cppi_DescType_HOST)
708 #endif
709 {
710 Cppi_HostDesc *nextBDPtr = (Cppi_HostDesc *)QMSS_DESC_PTR(descAddr);
711 while (nextBDPtr)
712 {
713 Osal_invalidateCache(nextBDPtr, TUNE_NETAPI_DESC_SIZE);
714 if (nextBDPtr->buffPtr)
715 {
716 nextBDPtr->buffPtr = (uint32_t)_Osal_qmssPhyToVirt((void *)(nextBDPtr->buffPtr));
717 if (!(nextBDPtr->buffPtr)) return (void *)0;
718 Osal_invalidateCache((void *)(nextBDPtr->buffPtr), nextBDPtr->buffLen);
719 }
721 if (nextBDPtr->origBuffPtr)
722 {
723 nextBDPtr->origBuffPtr = (uint32_t)_Osal_qmssPhyToVirt((void *)(nextBDPtr->origBuffPtr));
724 if (!(nextBDPtr->origBuffPtr)) return (void *)0;
725 }
727 if (nextBDPtr->nextBDPtr)
728 {
729 nextBDPtr->nextBDPtr = (uint32_t)_Osal_qmssPhyToVirt((void *)(nextBDPtr->nextBDPtr));
730 if (!(nextBDPtr->nextBDPtr)) return (void *)0;
731 }
733 nextBDPtr = (void *)QMSS_DESC_PTR((nextBDPtr->nextBDPtr));
734 }
735 }
736 #ifdef ASSUME_ALL_DESCRIPTOR
737 else if (Cppi_getDescType((Cppi_Desc *)QMSS_DESC_PTR(descAddr)) == Cppi_DescType_MONOLITHIC)
738 {
739 descAddr = _Osal_qmssPhyToVirt(descAddr);
740 if (!descAddr) return (void *)0;
741 Osal_invalidateCache(descAddr, TUNE_NETAPI_DESC_SIZE);
742 }
743 #endif
744 return descAddr;
745 }
747 void* Osal_stubCsEnter (void)
748 {
751 }
752 void Osal_stubCsExit (void *CsHandle)
753 {
754 /* Release Semaphore using handle */
757 return;
758 }