-/******************************************************************************\r
- * FILE PURPOSE: Functions to OSAL related routines for running NWAL, PA, QMSS,etc\r
- ******************************************************************************\r
- * FILE NAME: osal.c\r
- *\r
- * DESCRIPTION: Functions to initialize framework resources for running NWAL\r
- *\r
- * REVISION HISTORY:\r
- *\r
- * Copyright (c) Texas Instruments Incorporated 2010-2011\r
- * \r
- * Redistribution and use in source and binary forms, with or without \r
- * modification, are permitted provided that the following conditions \r
- * are met:\r
- *\r
- * Redistributions of source code must retain the above copyright \r
- * notice, this list of conditions and the following disclaimer.\r
- *\r
- * Redistributions in binary form must reproduce the above copyright\r
- * notice, this list of conditions and the following disclaimer in the \r
- * documentation and/or other materials provided with the \r
- * distribution.\r
- *\r
- * Neither the name of Texas Instruments Incorporated nor the names of\r
- * its contributors may be used to endorse or promote products derived\r
- * from this software without specific prior written permission.\r
- *\r
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \r
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT \r
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT \r
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, \r
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT \r
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \r
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \r
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
- *\r
- */\r
-\r
-/* CSL RL includes */\r
-#include <ti/csl/cslr_device.h>\r
-#include <ti/csl/csl_pscAux.h>\r
-#include <ti/csl/csl_semAux.h>\r
-#include <ti/csl/csl_cacheAux.h>\r
-#include <ti/csl/csl_xmcAux.h>\r
-\r
-#include <stdlib.h>\r
-#include <stdio.h>\r
-\r
-#include "netapi_vm.h"\r
-#include "netapi_timer.h"\r
-#define System_printf printf\r
-\r
-uint32_t Osal_qmss_MallocCounter =0;\r
-uint32_t Osal_qmss_FreeCounter =0;\r
-uint32_t Osal_cppi_MallocCounter =0;\r
-uint32_t Osal_cppi_FreeCounter =0;\r
-\r
-\r
-\r
-/* TODO: */\r
-#define DNUM 0\r
-\r
-#if 0\r
-uint32_t globalCritkey;\r
-\r
-/* Lock to be used for critical section */\r
-pthread_mutex_t mutex_lock;\r
-\r
-void nwalTest_osalInit() \r
-{\r
- pthread_mutex_init(&mutex_lock, NULL);\r
- return;\r
-}\r
-\r
-void nwalTest_osalshutdown() \r
-{\r
- pthread_mutex_destroy(&mutex_lock);\r
- return;\r
-}\r
-\r
-static inline void nwalTest_osalEnterCS()\r
-{\r
-#if 0\r
- pthread_mutex_lock(&mutex_lock);\r
-#endif \r
- return;\r
-}\r
-\r
-static inline void nwalTest_osalLeaveCS()\r
-{\r
-\r
-#if 0\r
- pthread_mutex_unlock(&mutex_lock);\r
-#endif\r
- return;\r
-}\r
-\r
-#endif\r
-\r
-/*****************************************************************************\r
- * FUNCTION PURPOSE: Cache Invalidation Routine\r
- ***************************************************************************** \r
- * DESCRIPTION: Cache Invalidation Routine\r
- *****************************************************************************/\r
-void Osal_invalidateCache (void *blockPtr, uint32_t size) \r
-{\r
- /* Stub Function. TBD: Would need to handle when cache is enabled for ARM */\r
- return;\r
-}\r
-\r
-/*****************************************************************************\r
- * FUNCTION PURPOSE: Cache Writeback Routine\r
- ***************************************************************************** \r
- * DESCRIPTION: Cache Invalidation Routine\r
- *****************************************************************************/\r
-void Osal_writeBackCache (void *blockPtr, uint32_t size) \r
-{\r
- /* Stub Function. TBD: Would need to handle when cache is enabled for ARM */\r
- return;\r
-}\r
-\r
-\r
-void * Osal_qmssMtCsEnter()\r
-{\r
- /* Stub Function. TBD: Would need to handle when for multi proc access \r
- * To be handled once infrastructure is available from Kernel\r
- */\r
- return NULL;\r
-}\r
-\r
-\r
-void Osal_qmssMtCsExit(void *key)\r
-{\r
- /* Stub Function. TBD: Would need to handle when for multi proc access \r
- * To be handled once infrastructure is available from Kernel\r
- */\r
- return;\r
-}\r
-\r
-void Osal_nwalCsEnter(uint32_t *key)\r
-{\r
- /* Stub Function. TBD: Would need to handle when for multi proc access \r
- * To be handled once infrastructure is available from Kernel\r
- */\r
- return;\r
-}\r
-\r
-void Osal_nwalCsExit(uint32_t key)\r
-{\r
- /* Stub Function. TBD: Would need to handle when for multi proc access \r
- * To be handled once infrastructure is available from Kernel\r
- */\r
- return;\r
-}\r
-\r
-\r
-void Osal_qmssLog ( String fmt, ... )\r
-{\r
-}\r
-\r
-\r
-void Osal_cppiCsEnter (uint32_t *key)\r
-{ \r
-\r
- /* Stub Function. TBD: Would need to handle when for multi proc access \r
- * To be handled once infrastructure is available from Kernel\r
- */\r
- return;\r
-}\r
-\r
-void Osal_cppiCsExit (uint32_t key)\r
-{\r
-\r
- /* Stub Function. TBD: Would need to handle when for multi proc access \r
- * To be handled once infrastructure is available from Kernel\r
- */\r
- return;\r
-}\r
-\r
-void Osal_cppiLog ( String fmt, ... )\r
-{\r
-}\r
-\r
-void Osal_paBeginMemAccess (Ptr addr, uint32_t size)\r
-{\r
- /* Stub Function. TBD: Would need to handle when for multi proc access \r
- * To be handled once infrastructure is available from Kernel\r
- */\r
-\r
-}\r
-\r
-void Osal_paEndMemAccess (Ptr addr, uint32_t size)\r
-{ \r
- /* Stub Function. TBD: Would need to handle when for multi proc access \r
- * To be handled once infrastructure is available from Kernel\r
- */\r
-}\r
-\r
-void* Osal_qmssCsEnter ()\r
-{\r
- \r
- /* Stub Function. TBD: Would need to handle when for multi proc access \r
- * To be handled once infrastructure is available from Kernel\r
- */\r
- return(NULL);\r
-}\r
-\r
-void Osal_qmssCsExit (void * key)\r
-{\r
- /* Stub Function. TBD: Would need to handle when for multi proc access \r
- * To be handled once infrastructure is available from Kernel\r
- */\r
- return;\r
-}\r
-\r
-Ptr Osal_qmssMalloc (uint32_t num_bytes)\r
-{\r
- Ptr ret;\r
- \r
- Osal_qmss_MallocCounter++;\r
- ret = malloc (num_bytes); \r
- if(ret==NULL)\r
- {\r
- System_printf("\nERROR! QMSS Malloc failed!\n");\r
- } \r
- \r
- return ret;\r
-}\r
-\r
-void Osal_qmssFree (Ptr ptr, uint32_t size)\r
-{\r
- /* Increment the free counter. */\r
- Osal_qmss_FreeCounter++; \r
- free(ptr);\r
-}\r
-\r
-Ptr Osal_cppiMalloc (uint32_t num_bytes)\r
-{\r
- Ptr ret;\r
- \r
- Osal_cppi_MallocCounter++;\r
- num_bytes += (CACHE_L2_LINESIZE-1);\r
- ret = malloc (num_bytes);\r
-\r
- if(ret==NULL)\r
- {\r
- System_printf("\nERROR! CPPI Malloc failed!\n");\r
- } \r
- \r
- return ret;\r
-}\r
-\r
-void Osal_cppiFree (Ptr ptr, uint32_t size)\r
-{\r
- /* Increment the free counter. */\r
- Osal_cppi_FreeCounter++; \r
- free(ptr); \r
-}\r
-\r
-void Osal_qmssBeginMemAccess (void *blockPtr, uint32_t size)\r
-{\r
- Osal_invalidateCache(blockPtr,size);\r
- return;\r
-}\r
-\r
-void Osal_qmssEndMemAccess (void *blockPtr, uint32_t size)\r
-{\r
- Osal_writeBackCache(blockPtr,size);\r
- return;\r
-}\r
-\r
-void Osal_cppiBeginMemAccess (void *blockPtr, uint32_t size)\r
-{\r
- Osal_invalidateCache(blockPtr,size);\r
- return;\r
-}\r
-\r
-void Osal_cppiEndMemAccess (void *blockPtr, uint32_t size)\r
-{\r
- Osal_writeBackCache(blockPtr,size);\r
- return;\r
-}\r
-\r
-void Osal_nwalInvalidateCache (void *blockPtr, uint32_t size)\r
-{\r
- Osal_invalidateCache(blockPtr,size);\r
- return;\r
-}\r
-\r
-void Osal_nwalWriteBackCache (void *blockPtr, uint32_t size)\r
-{\r
- Osal_writeBackCache(blockPtr,size);\r
- return;\r
-}\r
-\r
-uint32_t Osal_nwalGetCacheLineSize (void )\r
-{\r
- /* By default assumes L2 cache line is enabled. If not return CACHE_L1D_LINESIZE */\r
- return (CACHE_L2_LINESIZE);\r
-}\r
-\r
-/********************************************************************\r
- * FUNCTION PURPOSE: Convert local address to global\r
- ********************************************************************\r
- * DESCRIPTION: Returns global address\r
- ********************************************************************/\r
-\r
-unsigned int Osal_nwalLocToGlobAddr(unsigned int x)\r
-{\r
- return x;\r
-}\r
-\r
-uint16_t Osal_nwalGetProcId (void )\r
-{\r
- return DNUM;\r
-}\r
-uint64_t Osal_nwalGetTimeStamp(void)\r
-{\r
- /* Stub function to return timestamp\r
- */\r
- return netapi_getTimestamp();\r
-}\r
-#ifdef NWAL_ENABLE_SA \r
-void Osal_saCsEnter (uint32_t *key)\r
-{\r
- /* Stub Function. TBD: Would need to handle when for multi proc access \r
- * To be handled once infrastructure is available from Kernel\r
- */ \r
- ((CSL_semAcquireDirect (SA_HW_SEM)) == 0); \r
- return;\r
-}\r
-\r
-void Osal_saCsExit (uint32_t key)\r
-{\r
- /* Stub Function. TBD: Would need to handle when for multi proc access \r
- * To be handled once infrastructure is available from Kernel\r
- */ \r
- return;\r
-}\r
-\r
-\r
-void Osal_saMtCsEnter (uint32_t *key)\r
-{\r
- /* Stub Function. TBD: Would need to handle when for multi proc access \r
- * To be handled once infrastructure is available from Kernel\r
- */ \r
- return;\r
-}\r
-\r
-void Osal_saMtCsExit (uint32_t key)\r
-{\r
- /* Stub Function. TBD: Would need to handle when for multi proc access \r
- * To be handled once infrastructure is available from Kernel\r
- */ \r
- return;\r
-}\r
-\r
-void Osal_saBeginMemAccess (void *blockPtr, uint32_t size)\r
-{\r
- Osal_invalidateCache(blockPtr,size);\r
- return;\r
-}\r
-\r
-void Osal_saEndMemAccess (void *blockPtr, uint32_t size)\r
-{\r
- Osal_writeBackCache(blockPtr,size);\r
- return;\r
-}\r
-\r
-\r
-\r
-void Osal_saBeginScAccess (void* addr, uint32_t size)\r
-{\r
- Osal_invalidateCache(addr,size); \r
- \r
-}\r
- \r
-void Osal_saEndScAccess (void* addr, uint32_t size)\r
-{\r
- Osal_writeBackCache(addr,size);\r
- \r
-}\r
-\r
-#endif\r
-\r
-void Osal_pktLibBeginMemAccess(void* ptr, uint32_t size)\r
-{\r
- Osal_invalidateCache(ptr,size);\r
-}\r
-\r
-\r
-void Osal_pktLibEndMemAccess(void* ptr, uint32_t size)\r
-{\r
- Osal_writeBackCache(ptr,size);\r
-}\r
-\r
-\r
-void Osal_pktLibBeginPktAccess(Pktlib_HeapHandle heapHandle, Ti_Pkt* ptrPkt, uint32_t size)\r
-{\r
- /* TODO: We should use the 'heapHandle' and compare it with what we got from the\r
- * 'create/find' HEAP API & depending upon the comparison take appropriate action. \r
- * Just for testing we are always invalidating the cache here. */\r
-\r
- Osal_invalidateCache(ptrPkt,size);\r
-}\r
-\r
-\r
-void Osal_pktLibEndPktAccess(Pktlib_HeapHandle heapHandle, Ti_Pkt* ptrPkt, uint32_t size)\r
-{\r
- /* TODO: We should use the 'heapHandle' and compare it with what we got from the\r
- * 'create/find' HEAP API & depending upon the comparison take appropriate action. \r
- * Just for testing we are always writing back the cache here. */\r
-\r
- /* Writeback the contents of the cache. */\r
- Osal_writeBackCache(ptrPkt,size);\r
-}\r
-\r
-\r
-void* Osal_pktLibEnterCriticalSection(Pktlib_HeapHandle heapHandle)\r
-{\r
- /* TODO: We should use the 'heapHandle' and compare it with what we got from the\r
- * 'create/find' HEAP API & depending upon the comparison take appropriate action. \r
- * Implementations here could range from a MULTI-THREAD protection if the packets in \r
- * the heap are being accessed across multiple threads or MULTI-CORE if the packets\r
- * are being accessed across multiple cores and features: split and clone are used.\r
- * For NWAL layer no protection required.\r
- *\r
- * For testing we are not doing any of this so we are simply setting it to NOOP */\r
- return NULL;\r
-}\r
-\r
-\r
-void Osal_pktLibExitCriticalSection(Pktlib_HeapHandle heapHandle, void* csHandle)\r
-{\r
- /* TODO: We should use the 'heapHandle' and compare it with what we got from the\r
- * 'create/find' HEAP API & depending upon the comparison take appropriate action. \r
- * Implementations here could range from a MULTI-THREAD protection if the packets in \r
- * the heap are being accessed across multiple threads or MULTI-CORE if the packets\r
- * are being accessed across multiple cores and features: split and clone are used.\r
- * For NWAL layer no protection required.. \r
- *\r
- * For testing we are not doing any of this so we are simply setting it to NOOP */\r
- return; \r
-}\r
-\r
-void* Osal_qmssVirtToPhy (void *ptr)\r
-{\r
- return (void *)(netapi_VM_mem_start_phy + ((uint8_t*)ptr - netapi_VM_mem_start));\r
-}\r
-\r
-void* Osal_qmssPhyToVirt (void *ptr)\r
-{\r
- if (!ptr) return (void *) 0;\r
- //todo, see if out of range of mem_start_phy and size!! (like mmu would do)\r
- return (void *)(netapi_VM_mem_start + ((uint8_t*)ptr - netapi_VM_mem_start_phy));\r
-}\r
-\r
-/******************************************************************************\r
-* Function to traverse a CPPI descriptor and convert all address references\r
-* from virtual to physical.\r
-******************************************************************************/\r
-void* Osal_qmssConvertDescVirtToPhy(void *descAddr)\r
-{\r
- if (!descAddr) return (void *)0;\r
-\r
- if (Cppi_getDescType((Cppi_Desc *)QMSS_DESC_PTR(descAddr)) == Cppi_DescType_HOST)\r
- {\r
- Cppi_HostDesc *nextBDPtr = (Cppi_HostDesc *)QMSS_DESC_PTR(descAddr);\r
- Cppi_HostDesc *prevBDPtr = 0;\r
- while (nextBDPtr)\r
- {\r
- void *buffPtr;\r
- if (nextBDPtr->buffPtr)\r
- {\r
- buffPtr = (void *)nextBDPtr->buffPtr;\r
- nextBDPtr->buffPtr = (uint32_t)Osal_qmssVirtToPhy((void *)(nextBDPtr->buffPtr));\r
- if (!(nextBDPtr->buffPtr)) return (void *)0;\r
- }\r
-\r
- if (nextBDPtr->origBuffPtr)\r
- {\r
- nextBDPtr->origBuffPtr = (uint32_t)Osal_qmssVirtToPhy((void *)(nextBDPtr->origBuffPtr));\r
- if (!(nextBDPtr->origBuffPtr)) return (void *)0;\r
- }\r
-\r
- prevBDPtr = nextBDPtr;\r
- nextBDPtr = (Cppi_HostDesc *)QMSS_DESC_PTR((nextBDPtr->nextBDPtr));\r
- if (prevBDPtr->nextBDPtr)\r
- {\r
- prevBDPtr->nextBDPtr = (uint32_t)Osal_qmssVirtToPhy((void *)(prevBDPtr->nextBDPtr));\r
- if (!(prevBDPtr->nextBDPtr)) return (void *)0;\r
- }\r
-\r
- Qmss_osalEndMemAccess(buffPtr, prevBDPtr->buffLen);\r
- Qmss_osalEndMemAccess(prevBDPtr, sizeof(Cppi_HostDesc));\r
- }\r
- descAddr = Osal_qmssVirtToPhy(descAddr);\r
- if (!descAddr) return (void *)0;\r
- }\r
- else if (Cppi_getDescType((Cppi_Desc *)QMSS_DESC_PTR(descAddr)) == Cppi_DescType_MONOLITHIC)\r
- {\r
- descAddr = Osal_qmssVirtToPhy(descAddr);\r
- if (!descAddr) return (void *)0;\r
- }\r
- return descAddr;\r
-\r
-}\r
-\r
-\r
-/******************************************************************************\r
-* Function to traverse a CPPI descriptor and convert all address references\r
-* from physical to virtual.\r
-******************************************************************************/\r
-void* Osal_qmssConvertDescPhyToVirt(void *descAddr)\r
-{\r
- if (!descAddr) return (void *)0;\r
- descAddr = Osal_qmssPhyToVirt(descAddr);\r
-\r
- if (Cppi_getDescType((Cppi_Desc *)QMSS_DESC_PTR(descAddr)) == Cppi_DescType_HOST)\r
- {\r
- Cppi_HostDesc *nextBDPtr = (Cppi_HostDesc *)QMSS_DESC_PTR(descAddr);\r
- while (nextBDPtr)\r
- {\r
- Qmss_osalBeginMemAccess(nextBDPtr, sizeof(Cppi_HostDesc));\r
- if (nextBDPtr->buffPtr)\r
- {\r
- nextBDPtr->buffPtr = (uint32_t)Osal_qmssPhyToVirt((void *)(nextBDPtr->buffPtr));\r
- if (!(nextBDPtr->buffPtr)) return (void *)0;\r
- }\r
-\r
- if (nextBDPtr->origBuffPtr)\r
- {\r
- nextBDPtr->origBuffPtr = (uint32_t)Osal_qmssPhyToVirt((void *)(nextBDPtr->origBuffPtr));\r
- if (!(nextBDPtr->origBuffPtr)) return (void *)0;\r
- }\r
-\r
- if (nextBDPtr->nextBDPtr)\r
- {\r
- nextBDPtr->nextBDPtr = (uint32_t)Osal_qmssPhyToVirt((void *)(nextBDPtr->nextBDPtr));\r
- if (!(nextBDPtr->nextBDPtr)) return (void *)0;\r
- }\r
-\r
- Qmss_osalBeginMemAccess((void *)(nextBDPtr->buffPtr), nextBDPtr->buffLen);\r
- nextBDPtr = (void *)QMSS_DESC_PTR((nextBDPtr->nextBDPtr));\r
- }\r
- }\r
- else if (Cppi_getDescType((Cppi_Desc *)QMSS_DESC_PTR(descAddr)) == Cppi_DescType_MONOLITHIC)\r
- {\r
- descAddr = Osal_qmssPhyToVirt(descAddr);\r
- if (!descAddr) return (void *)0;\r
- }\r
- return descAddr;\r
-}\r
-\r
-void* Osal_stubCsEnter (void)\r
-{\r
-\r
-\r
-}\r
-void Osal_stubCsExit (void *CsHandle)\r
-{\r
- /* Release Semaphore using handle */\r
-\r
-\r
- return;\r
-}\r
-\r
+/******************************************************************************
+ * FILE PURPOSE: Functions to OSAL related routines for running NWAL, PA, QMSS,etc
+ ******************************************************************************
+ * FILE NAME: osal.c
+ *
+ * DESCRIPTION: Functions to initialize framework resources for running NWAL
+ *
+ * REVISION HISTORY:
+ *
+ * Copyright (c) Texas Instruments Incorporated 2010-2011
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/* CSL RL includes */
+#include <ti/csl/cslr_device.h>
+#include <ti/csl/csl_pscAux.h>
+#include <ti/csl/csl_semAux.h>
+#include <ti/csl/csl_cacheAux.h>
+#include <ti/csl/csl_xmcAux.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "netapi_tune.h"
+#include "netapi_vm.h"
+#include "netapi_timer.h"
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <string.h>
+#include "netapi_util.h"
+#include "tools/module/netapimod.h"
+#include <ti/drv/sa/sa_osal.h>
+#define System_printf printf
+
+uint32_t Osal_qmss_MallocCounter =0;
+uint32_t Osal_qmss_FreeCounter =0;
+uint32_t Osal_cppi_MallocCounter =0;
+uint32_t Osal_cppi_FreeCounter =0;
+
+void* Osal_saGetSCPhyAddr(void* vaddr);
+
+
+
+/* TODO: */
+#define DNUM 0
+
+#if 0
+uint32_t globalCritkey;
+
+/* Lock to be used for critical section */
+pthread_mutex_t mutex_lock;
+
+void nwalTest_osalInit()
+{
+ pthread_mutex_init(&mutex_lock, NULL);
+ return;
+}
+
+void nwalTest_osalshutdown()
+{
+ pthread_mutex_destroy(&mutex_lock);
+ return;
+}
+
+static inline void nwalTest_osalEnterCS()
+{
+#if 0
+ pthread_mutex_lock(&mutex_lock);
+#endif
+ return;
+}
+
+static inline void nwalTest_osalLeaveCS()
+{
+
+#if 0
+ pthread_mutex_unlock(&mutex_lock);
+#endif
+ return;
+}
+
+#endif
+
+
+
+/**********USE SPACE ACCESS TO KERNEL MEMORY SERVICES*************/
+static int netapi_fd;
+
+/***init **/
+int netapi_utilModInit(void)
+{
+ netapi_fd = open("/dev/netapi", O_RDWR);
+
+ if (netapi_fd == -1) {
+ return -1;
+ }
+ return netapi_fd;
+}
+
+/***close **/
+void netapi_utilModClose(void)
+{
+ close(netapi_fd);
+}
+
+/* return physical address of region kernel module has allocated for us */
+unsigned long netapi_utilGetPhysOfBufferArea(void)
+{
+ unsigned long physp;
+
+ if (ioctl(netapi_fd, NETAPIMOD_IOCGETPHYS | NETAPIMOD_IOCMAGIC, &physp) == -1) {
+ return 0;
+ }
+ return physp;
+}
+
+/* return the size of that region */
+unsigned long netapi_utilGetSizeOfBufferArea(void)
+{
+ unsigned long size;
+
+ if (ioctl(netapi_fd, NETAPIMOD_IOCGETSIZE | NETAPIMOD_IOCMAGIC, &size) == -1) {
+ return 0;
+ }
+ return size;
+}
+
+//*****for the actual wb, inv cache ops, call the osal_xxx version, not these directly
+// (so make inline)
+/** write back operation on block */
+static inline int _netapi_utilCacheWb(void *ptr, size_t size)
+{
+ struct netapimod_block block;
+
+ block.addr = (unsigned long)ptr;
+ block.size = size;
+
+ if (ioctl(netapi_fd, NETAPIMOD_IOCCACHEWB | NETAPIMOD_IOCMAGIC, &block) == -1) {
+ return -1;
+ }
+ return 0;
+}
+int netapi_utilCacheWb(void *ptr, size_t size) {return _netapi_utilCacheWb(ptr,size);}
+
+/** write back & invalidate **/
+static inline int _netapi_utilCacheWbInv(void *ptr, size_t size)
+{
+ struct netapimod_block block;
+
+ block.addr = (unsigned long)ptr;
+ block.size = size;
+
+ if (ioctl(netapi_fd, NETAPIMOD_IOCCACHEWBINV | NETAPIMOD_IOCMAGIC, &block) == -1) {
+ return -1;
+ }
+ return 0;
+}
+
+int netapi_utilCacheWbInv(void *ptr, size_t size) {return _netapi_utilCacheWbInv(ptr,size);}
+/** just invalidate **/
+static inline int _netapi_utilCacheInv(void *ptr, size_t size)
+{
+ struct netapimod_block block;
+
+ block.addr = (unsigned long)ptr;
+ block.size = size;
+
+ if (ioctl(netapi_fd, NETAPIMOD_IOCCACHEINV | NETAPIMOD_IOCMAGIC, &block) == -1) {
+ return -1;
+ }
+ return 0;
+}
+int netapi_utilCacheInv(void *ptr, size_t size) {return _netapi_utilCacheInv(ptr,size);}
+
+//***mmap the block into our user space process memory map. */
+unsigned long netapi_utilGetVaOfBufferArea(unsigned int offset, unsigned int size)
+{
+ void *userp;
+
+ /* Map the physical address to user space */
+ userp = mmap(0, // Preferred start address
+ size, // Length to be mapped
+ PROT_WRITE | PROT_READ, // Read and write access
+ MAP_SHARED, // Shared memory
+ netapi_fd, // File descriptor
+ offset); // The byte offset from fd
+
+ if (userp == MAP_FAILED) {
+ return 0;
+ }
+ return (unsigned long)userp;
+}
+
+static unsigned int cache_op_cycles=0;
+static unsigned int n_cache_op_cycles=0;
+void Osal_cache_op_measure_reset(void) { cache_op_cycles=0; n_cache_op_cycles=0;}
+/*****************************************************************************
+ * FUNCTION PURPOSE: Cache Invalidation Routine
+ *****************************************************************************
+ * DESCRIPTION: Cache Invalidation Routine
+ *****************************************************************************/
+static inline void Osal_invalidateCache (void *blockPtr, uint32_t size)
+{
+#ifdef NETAPI_TUNE_USE_CACHE_OPS
+ register unsigned int v1;
+ register unsigned int v2;
+
+ v1= netapi_timing_stop();
+ if (((uint8_t*)blockPtr <netapi_VM_mem_start)||( (uint8_t*)blockPtr>netapi_VM_mem_end)) return;
+ _netapi_utilCacheInv(blockPtr, size);
+ v2= netapi_timing_stop();
+ cache_op_cycles += (v2-v1);
+ n_cache_op_cycles+=1;
+#endif
+
+ return;
+}
+
+/*****************************************************************************
+ * FUNCTION PURPOSE: Cache Writeback Routine
+ *****************************************************************************
+ * DESCRIPTION: Cache Invalidation Routine
+ *****************************************************************************/
+/* stats */
+
+unsigned int Osal_cache_op_measure(int * p_n) { *p_n = n_cache_op_cycles; return cache_op_cycles;}
+
+static inline void Osal_writeBackCache (void *blockPtr, uint32_t size)
+{
+#ifdef NETAPI_TUNE_USE_CACHE_OPS
+ register unsigned int v1;
+ register unsigned int v2;
+
+ v1= netapi_timing_stop();
+ if (((uint8_t*)blockPtr <netapi_VM_mem_start)||( (uint8_t*)blockPtr>netapi_VM_mem_end)) return;
+ _netapi_utilCacheWbInv(blockPtr, size);
+ v2= netapi_timing_stop();
+ cache_op_cycles += (v2-v1);
+ n_cache_op_cycles+=1;
+#endif
+ return;
+}
+
+
+void * Osal_qmssMtCsEnter()
+{
+ /* Stub Function. TBD: Would need to handle when for multi proc access
+ * To be handled once infrastructure is available from Kernel
+ */
+ return NULL;
+}
+
+
+void Osal_qmssMtCsExit(void *key)
+{
+ /* Stub Function. TBD: Would need to handle when for multi proc access
+ * To be handled once infrastructure is available from Kernel
+ */
+ return;
+}
+
+void Osal_nwalCsEnter(uint32_t *key)
+{
+ /* Stub Function. TBD: Would need to handle when for multi proc access
+ * To be handled once infrastructure is available from Kernel
+ */
+ return;
+}
+
+void Osal_nwalCsExit(uint32_t key)
+{
+ /* Stub Function. TBD: Would need to handle when for multi proc access
+ * To be handled once infrastructure is available from Kernel
+ */
+ return;
+}
+
+
+void Osal_qmssLog ( String fmt, ... )
+{
+}
+
+
+void Osal_cppiCsEnter (uint32_t *key)
+{
+
+ /* Stub Function. TBD: Would need to handle when for multi proc access
+ * To be handled once infrastructure is available from Kernel
+ */
+ return;
+}
+
+void Osal_cppiCsExit (uint32_t key)
+{
+
+ /* Stub Function. TBD: Would need to handle when for multi proc access
+ * To be handled once infrastructure is available from Kernel
+ */
+ return;
+}
+
+void Osal_cppiLog ( String fmt, ... )
+{
+}
+
+void Osal_paBeginMemAccess (Ptr addr, uint32_t size)
+{
+ /* Stub Function. TBD: Would need to handle when for multi proc access
+ * To be handled once infrastructure is available from Kernel
+ */
+
+}
+
+void Osal_paEndMemAccess (Ptr addr, uint32_t size)
+{
+ /* Stub Function. TBD: Would need to handle when for multi proc access
+ * To be handled once infrastructure is available from Kernel
+ */
+}
+void Osal_paMtCsEnter (uint32_t *key)
+{
+ /* Stub Function. TBD: Would need to handle when for multi proc access
+ * To be handled once infrastructure is available from Kernel
+ */
+}
+void Osal_paMtCsExit (uint32_t key)
+{
+ /* Stub Function. TBD: Would need to handle when for multi proc access
+ * To be handled once infrastructure is available from Kernel
+ */
+}
+
+
+void* Osal_qmssCsEnter ()
+{
+
+ /* Stub Function. TBD: Would need to handle when for multi proc access
+ * To be handled once infrastructure is available from Kernel
+ */
+ return(NULL);
+}
+
+void Osal_qmssCsExit (void * key)
+{
+ /* Stub Function. TBD: Would need to handle when for multi proc access
+ * To be handled once infrastructure is available from Kernel
+ */
+ return;
+}
+
+Ptr Osal_qmssMalloc (uint32_t num_bytes)
+{
+ Ptr ret;
+
+ Osal_qmss_MallocCounter++;
+ ret = malloc (num_bytes);
+ if(ret==NULL)
+ {
+ System_printf("\nERROR! QMSS Malloc failed!\n");
+ }
+
+ return ret;
+}
+
+void Osal_qmssFree (Ptr ptr, uint32_t size)
+{
+ /* Increment the free counter. */
+ Osal_qmss_FreeCounter++;
+ free(ptr);
+}
+
+Ptr Osal_cppiMalloc (uint32_t num_bytes)
+{
+ Ptr ret;
+
+ Osal_cppi_MallocCounter++;
+ num_bytes += (CACHE_L2_LINESIZE-1);
+ ret = malloc (num_bytes);
+
+ if(ret==NULL)
+ {
+ System_printf("\nERROR! CPPI Malloc failed!\n");
+ }
+
+ return ret;
+}
+
+void Osal_cppiFree (Ptr ptr, uint32_t size)
+{
+ /* Increment the free counter. */
+ Osal_cppi_FreeCounter++;
+ free(ptr);
+}
+
+void Osal_qmssBeginMemAccess (void *blockPtr, uint32_t size)
+{
+ //Osal_invalidateCache(blockPtr,size);
+ return;
+}
+
+void Osal_qmssEndMemAccess (void *blockPtr, uint32_t size)
+{
+ //Osal_writeBackCache(blockPtr,size);
+ return;
+}
+
+void Osal_cppiBeginMemAccess (void *blockPtr, uint32_t size)
+{
+ //Osal_invalidateCache(blockPtr,size);
+ return;
+}
+
+void Osal_cppiEndMemAccess (void *blockPtr, uint32_t size)
+{
+ //Osal_writeBackCache(blockPtr,size);
+ return;
+}
+
+void Osal_nwalInvalidateCache (void *blockPtr, uint32_t size)
+{
+ //Osal_invalidateCache(blockPtr,size);
+ return;
+}
+
+void Osal_nwalWriteBackCache (void *blockPtr, uint32_t size)
+{
+ //Osal_writeBackCache(blockPtr,size);
+ return;
+}
+
+uint32_t Osal_nwalGetCacheLineSize (void )
+{
+ /* By default assumes L2 cache line is enabled. If not return CACHE_L1D_LINESIZE */
+ return (CACHE_L2_LINESIZE);
+}
+
+/********************************************************************
+ * FUNCTION PURPOSE: Convert local address to global
+ ********************************************************************
+ * DESCRIPTION: Returns global address
+ ********************************************************************/
+
+unsigned int Osal_nwalLocToGlobAddr(unsigned int x)
+{
+ return x;
+}
+
+uint16_t Osal_nwalGetProcId (void )
+{
+ return DNUM;
+}
+uint64_t Osal_nwalGetTimeStamp(void)
+{
+ /* Stub function to return timestamp
+ */
+ return netapi_getTimestamp();
+}
+
+uint16_t Osal_saGetProcId (void )
+{
+ return 0;
+}
+
+void* Osal_saGetSCPhyAddr(void* vaddr)
+{
+ if(vaddr == NULL)
+ {
+ return NULL;
+ }
+ return (void *)(netapi_VM_mem_start_phy + ((uint8_t*) vaddr - netapi_VM_mem_start));
+
+}
+
+void Osal_saBeginScAccess (void* addr, uint32_t size)
+{
+ Osal_invalidateCache(addr,size);
+
+}
+
+void Osal_saEndScAccess (void* addr, uint32_t size)
+{
+ Osal_writeBackCache(addr,size);
+
+}
+
+
+void Osal_saCsEnter (uint32_t *key)
+{
+ /* Stub Function. TBD: Would need to handle when for multi proc access
+ * To be handled once infrastructure is available from Kernel
+ */
+ //((CSL_semAcquireDirect (SA_HW_SEM)) == 0);
+ return;
+}
+
+void Osal_saCsExit (uint32_t key)
+{
+ /* Stub Function. TBD: Would need to handle when for multi proc access
+ * To be handled once infrastructure is available from Kernel
+ */
+ return;
+}
+
+
+void Osal_saMtCsEnter (uint32_t *key)
+{
+ /* Stub Function. TBD: Would need to handle when for multi proc access
+ * To be handled once infrastructure is available from Kernel
+ */
+ return;
+}
+
+void Osal_saMtCsExit (uint32_t key)
+{
+ /* Stub Function. TBD: Would need to handle when for multi proc access
+ * To be handled once infrastructure is available from Kernel
+ */
+ return;
+}
+
+void Osal_saBeginMemAccess (void *blockPtr, uint32_t size)
+{
+ Osal_invalidateCache(blockPtr,size);
+ return;
+}
+
+void Osal_saEndMemAccess (void *blockPtr, uint32_t size)
+{
+ Osal_writeBackCache(blockPtr,size);
+ return;
+}
+
+int Osal_saGetSysEndianMode(void)
+{
+#if defined( _BIG_ENDIAN )
+ return((int)sa_SYS_ENDIAN_MODE_BIG);
+#else
+ return((int)sa_SYS_ENDIAN_MODE_LITTLE);
+#endif
+}
+
+void Osal_pktLibBeginMemAccess(void* ptr, uint32_t size)
+{
+ //Osal_invalidateCache(ptr,size);
+}
+
+
+void Osal_pktLibEndMemAccess(void* ptr, uint32_t size)
+{
+ //Osal_writeBackCache(ptr,size);
+}
+
+
+void Osal_pktLibBeginPktAccess(Pktlib_HeapHandle heapHandle, Ti_Pkt* ptrPkt, uint32_t size)
+{
+ Osal_invalidateCache(ptrPkt,size);
+}
+
+
+void Osal_pktLibEndPktAccess(Pktlib_HeapHandle heapHandle, Ti_Pkt* ptrPkt, uint32_t size)
+{
+
+ /* Cache Write back for the packet. Currently being disabled as it will be done during
+ * QMSS Push operation
+
+ Osal_writeBackCache((void *)ptrPkt,size);
+ */
+}
+
+
+void* Osal_pktLibEnterCriticalSection(Pktlib_HeapHandle heapHandle)
+{
+ /* TODO: We should use the 'heapHandle' and compare it with what we got from the
+ * 'create/find' HEAP API & depending upon the comparison take appropriate action.
+ * Implementations here could range from a MULTI-THREAD protection if the packets in
+ * the heap are being accessed across multiple threads or MULTI-CORE if the packets
+ * are being accessed across multiple cores and features: split and clone are used.
+ * For NWAL layer no protection required.
+ *
+ * For testing we are not doing any of this so we are simply setting it to NOOP */
+ return NULL;
+}
+
+
+void Osal_pktLibExitCriticalSection(Pktlib_HeapHandle heapHandle, void* csHandle)
+{
+ /* TODO: We should use the 'heapHandle' and compare it with what we got from the
+ * 'create/find' HEAP API & depending upon the comparison take appropriate action.
+ * Implementations here could range from a MULTI-THREAD protection if the packets in
+ * the heap are being accessed across multiple threads or MULTI-CORE if the packets
+ * are being accessed across multiple cores and features: split and clone are used.
+ * For NWAL layer no protection required..
+ *
+ * For testing we are not doing any of this so we are simply setting it to NOOP */
+ return;
+}
+
+
+void* Osal_pktLibPhyToVirt(void *ptr)
+{
+ return(_Osal_qmssPhyToVirt(ptr));
+}
+
+void* Osal_qmssVirtToPhy (void *ptr)
+{
+ return _Osal_qmssVirtToPhy(ptr);
+}
+
+void * Osal_qmssPhyToVirt (void *ptr)
+{
+ return _Osal_qmssPhyToVirt(ptr);
+}
+
+/******************************************************************************
+* Function to traverse a CPPI descriptor and convert all address references
+* from virtual to physical.
+******************************************************************************/
+//#define ASSUME_ALL_DESCRIPTOR //define this if mono and host descriptors are present, else don't
+ //define and just host will be assumed (more efficient)
+void* Osal_qmssConvertDescVirtToPhy(void *descAddr)
+{
+ if (!descAddr) return (void *)0;
+#ifdef ASSUME_ALL_DESCRIPTOR
+ if (Cppi_getDescType((Cppi_Desc *)QMSS_DESC_PTR(descAddr)) == Cppi_DescType_HOST)
+#endif
+ {
+ Cppi_HostDesc *nextBDPtr = (Cppi_HostDesc *)QMSS_DESC_PTR(descAddr);
+ Cppi_HostDesc *prevBDPtr = 0;
+ while (nextBDPtr)
+ {
+ void *buffPtr=NULL;
+ if (nextBDPtr->buffPtr)
+ {
+ buffPtr = (void *)nextBDPtr->buffPtr;
+ nextBDPtr->buffPtr = (uint32_t)_Osal_qmssVirtToPhy((void *)(nextBDPtr->buffPtr));
+ if (!(nextBDPtr->buffPtr)) return (void *)0;
+ Osal_writeBackCache(buffPtr, nextBDPtr->buffLen);
+ }
+
+ if (nextBDPtr->origBuffPtr)
+ {
+ nextBDPtr->origBuffPtr = (uint32_t)_Osal_qmssVirtToPhy((void *)(nextBDPtr->origBuffPtr));
+ if (!(nextBDPtr->origBuffPtr)) return (void *)0;
+ }
+
+ prevBDPtr = nextBDPtr;
+ nextBDPtr = (Cppi_HostDesc *)QMSS_DESC_PTR((nextBDPtr->nextBDPtr));
+ if (prevBDPtr->nextBDPtr)
+ {
+ prevBDPtr->nextBDPtr = (uint32_t)_Osal_qmssVirtToPhy((void *)(prevBDPtr->nextBDPtr));
+ if (!(prevBDPtr->nextBDPtr)) return (void *)0;
+ }
+
+ Osal_writeBackCache(prevBDPtr, TUNE_NETAPI_DESC_SIZE);
+ }
+ descAddr = _Osal_qmssVirtToPhy(descAddr);
+ if (!descAddr) return (void *)0;
+ }
+#ifdef ASSUME_ALL_DESCRIPTOR
+ else if (Cppi_getDescType((Cppi_Desc *)QMSS_DESC_PTR(descAddr)) == Cppi_DescType_MONOLITHIC)
+ {
+ Osal_writeBackCache(descAddr, TUNE_NETAPI_DESC_SIZE);
+ descAddr = _Osal_qmssVirtToPhy(descAddr);
+ if (!descAddr) return (void *)0;
+ }
+#endif
+ return descAddr;
+
+}
+
+
+/******************************************************************************
+* Function to traverse a CPPI descriptor and convert all address references
+* from physical to virtual.
+******************************************************************************/
+void* Osal_qmssConvertDescPhyToVirt(void *descAddr)
+{
+ if (!descAddr) return (void *)0;
+ descAddr = _Osal_qmssPhyToVirt(descAddr);
+
+#ifdef ASSUME_ALL_DESCRIPTOR
+ if (Cppi_getDescType((Cppi_Desc *)QMSS_DESC_PTR(descAddr)) == Cppi_DescType_HOST)
+#endif
+ {
+ Cppi_HostDesc *nextBDPtr = (Cppi_HostDesc *)QMSS_DESC_PTR(descAddr);
+ while (nextBDPtr)
+ {
+ Osal_invalidateCache(nextBDPtr, TUNE_NETAPI_DESC_SIZE);
+ if (nextBDPtr->buffPtr)
+ {
+ nextBDPtr->buffPtr = (uint32_t)_Osal_qmssPhyToVirt((void *)(nextBDPtr->buffPtr));
+ if (!(nextBDPtr->buffPtr)) return (void *)0;
+ Osal_invalidateCache((void *)(nextBDPtr->buffPtr), nextBDPtr->buffLen);
+ }
+
+ if (nextBDPtr->origBuffPtr)
+ {
+ nextBDPtr->origBuffPtr = (uint32_t)_Osal_qmssPhyToVirt((void *)(nextBDPtr->origBuffPtr));
+ if (!(nextBDPtr->origBuffPtr)) return (void *)0;
+ }
+
+ if (nextBDPtr->nextBDPtr)
+ {
+ nextBDPtr->nextBDPtr = (uint32_t)_Osal_qmssPhyToVirt((void *)(nextBDPtr->nextBDPtr));
+ if (!(nextBDPtr->nextBDPtr)) return (void *)0;
+ }
+
+ nextBDPtr = (void *)QMSS_DESC_PTR((nextBDPtr->nextBDPtr));
+ }
+ }
+#ifdef ASSUME_ALL_DESCRIPTOR
+ else if (Cppi_getDescType((Cppi_Desc *)QMSS_DESC_PTR(descAddr)) == Cppi_DescType_MONOLITHIC)
+ {
+ descAddr = _Osal_qmssPhyToVirt(descAddr);
+ if (!descAddr) return (void *)0;
+ Osal_invalidateCache(descAddr, TUNE_NETAPI_DESC_SIZE);
+ }
+#endif
+ return descAddr;
+}
+
+void* Osal_stubCsEnter (void)
+{
+
+
+}
+void Osal_stubCsExit (void *CsHandle)
+{
+ /* Release Semaphore using handle */
+
+
+ return;
+}
+