summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 9e5c22d)
raw | patch | inline | side by side (parent: 9e5c22d)
author | Raghu Nambiath <rnambiath@ti.com> | |
Mon, 11 Jun 2012 22:13:14 +0000 (18:13 -0400) | ||
committer | Raghu Nambiath <rnambiath@ti.com> | |
Mon, 11 Jun 2012 22:13:14 +0000 (18:13 -0400) |
16 files changed:
index d1dd141bad5cda56fe06e6fc4ba491136285e074..82795e4e436b10fcc4b0ab8337f7247050f307e9 100755 (executable)
-/**************************************************************\r
- * FILE PURPOSE : -----------NETAPI-------------\r
- * user space access to transport resources on SOC\r
- **************************************************************\r
- * @file netapi.h\r
- * \r
- * @brief DESCRIPTION: netapi main header file for user space transport\r
- * library\r
- * \r
- * REVISION HISTORY: rev 0.0.1 \r
- *\r
- * Copyright (c) Texas Instruments Incorporated 2010-2011\r
- * \r
- * Redistribution and use in source and binary forms, with or without \r
- * modification, are permitted provided that the following conditions \r
- * are met:\r
- *\r
- * Redistributions of source code must retain the above copyright \r
- * notice, this list of conditions and the following disclaimer.\r
- *\r
- * Redistributions in binary form must reproduce the above copyright\r
- * notice, this list of conditions and the following disclaimer in the \r
- * documentation and/or other materials provided with the \r
- * distribution.\r
- *\r
- * Neither the name of Texas Instruments Incorporated nor the names of\r
- * its contributors may be used to endorse or promote products derived\r
- * from this software without specific prior written permission.\r
- *\r
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \r
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT \r
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT \r
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, \r
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT \r
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \r
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \r
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
-*****************************************************************************/\r
-\r
-/** @mainpage Network API \r
- *\r
- * @section intro Introduction\r
- *\r
- * The network API provides a user space interface to TI SOC transport \r
- * Resources. The library includes:\r
- * - general startup and setup for user space operations\r
- * - memory heap and packet buffer management\r
- * - pktio either to/from network or internal queues\r
- * - timers for network stacks\r
- * - netcp (network co-processor) configuration and control\r
- * - utilities including user space synchronization primitivies\r
- * - sample scheduling event loop\r
- *\r
- * NETAPI allows user space transport to configure control the NETCP:\r
- * - Classification of packets based on L2: MAC header fields\r
- * - Classification of packets based on L3: IP header fields\r
- * - Routing of packets to host based on L4 UDP or L5 GTPU ID\r
- * - Unidirectional IPSec SA creation and deletion\r
- * - Unidirectional IPSec Security Policy creation and deletion\r
- *\r
- * \par\r
- * NOTE:\r
- * (C) Copyright 2010-2011 Texas Instruments, Inc.\r
- * \par\r
- */\r
-\r
-/* Define NETAPI as a master group in Doxygen format and add all NETAPI \r
- definitions to this group. */\r
-/** @defgroup netapi USERSPACE TRANSPORT NETAPI\r
- * @{\r
- */\r
-/** @} */\r
-\r
-\r
-#ifndef __NETAPI__H\r
-#define __NETAPI__H\r
-#include <stdint.h>\r
-#include <stdlib.h>\r
-#include <stddef.h>\r
-#include <string.h>\r
-\r
-/**\r
- * @defgroup netapi_structures NETAPI data structures\r
- */\r
-/** @ingroup netapi */\r
-\r
-/** @defgroup netapi_api_functions NETAPI API's\r
- * @ingroup netapi_api_functions\r
- */\r
-\r
-/** @ingroup netapi_structures */\r
-/**\r
- * @def NETAPI_T\r
- * @brief netapi handle: one per thread\r
- * used in most NETAPI calls\r
- */\r
-typedef void * NETAPI_T;\r
-\r
-\r
-#define NETAPI_SYS_MASTER 2 //master for system\r
-#define NETAPI_CORE_MASTER 1 //master for core\r
-#define NETAPI_NO_MASTER 0 //data only\r
-\r
-/***********************************************\r
-*************RUN TIME CONTROLS*****************\r
-***********************************************/\r
-typedef struct NETAPI_CFG_Tag\r
-{\r
- int def_mem_size; //bytes of CMA memory we have allocated \r
- int def_flow_pkt_rx_offset; //offset in pkt buffer for hw to start RX\r
- int def_max_descriptors; //# of descriptors in system (must be power of 2), 2^14 max\r
- int def_tot_descriptors_for_us; //#of descriptors to create in our region (must be power of 2)\r
- int def_heap_n_descriptors; //# descriptors+buffers in default heap\r
- int def_heap_n_zdescriptors; //# zero len descriptors in defaule heap\r
- int def_heap_buf_size; //size of buffers in default heap\r
-}NETAPI_CFG_T;\r
-\r
-\r
-\r
-#include "netapi_err.h"\r
-#include "netapi_tune.h"\r
-#include "ti/runtime/pktlib/pktlib_osal.h"\r
-#include "ti/runtime/pktlib/pktlib.h"\r
-#include "pktio.h"\r
-#include "ti/drv/pa/pa.h"\r
-#include "netcp_cfg.h"\r
-#include "netapi_sec.h"\r
-#include "netapi_sched.h"\r
-#include "src/netapi_vm.h"\r
-#include "src/netapi_util.h"\r
-#include "netsync.h"\r
-#include "ti/drv/nwal/nwal.h"\r
-#include "netapi_timer.h"\r
-#include "src/netapi_loc.h"\r
-\r
-/************************************************\r
- **********BUILD TIME CONTROLS *****************\r
- ***********************************************/\r
-/* see netapi_tune.h */\r
-\r
-\r
-/*************************************\r
- **************NETAPI****************\r
- ************************************/\r
-\r
-/** @ingroup netapi_api_functions */\r
-\r
-/*\r
-* @brief API instantiates the NETAPI and allocated global resources and is pre-requisite \r
- *\r
- * @details Allocates global resources valid per system level common across all ARM cores \r
- * or per thread based on "master" argument.\r
- * Intializes the following substems:\r
- * - pktio\r
- * - pktlib\r
- * - qmss\r
- * - cppi\r
- * - nwal\r
- * @param[in] master mode: NETAPI_SYS_MASTER or NETAPI_NO_MASTER\r
- * @param[in] configuration (master mode). pointer to NETAPI_CFG_T above or NULL\r
- * @retval @ref NETAPI_T: handle to the instance or NULL on error \r
- * @pre none \r
- */\r
-NETAPI_T netapi_init(int master, NETAPI_CFG_T * p_cfg);\r
-\r
-\r
-/** @ingroup netapi_api_functions */\r
-/*\r
-* @brief API shutdowns a previously intialized NETAPI instance \r
- *\r
- * @details de-llocates global resources valid per system level common across all ARM cores \r
- * or per thread based on "master" argument passed in at init time.\r
- * @param[in] @ref NETAPI_T: handle to the instance \r
- * @retval none \r
- * @pre @ref netapi_init \r
- */\r
-void netapi_shutdown(NETAPI_T p);\r
-\r
-/** @ingroup netapi_api_functions */\r
-/*\r
-\r
-* @brief API returns a @ref Pktlib_HeapIfTable to use when creating pktlib heaps \r
- *\r
- * @details Application will need a heapIfTable in order to create its own heaps. This\r
- * function returns a table that can be passed in the call to @ref Pktlib_CreateHeap\r
-* The memory used for these heaps is special: \r
- * - specific alignment, \r
- * - must be contguous, \r
- * - must have a physical to virtual mapping that\r
- * - is known by NETAPI. \r
- * Thus it must be completely managed by NETAPI. This interfaced table provides a\r
- * malloc function that the pktlib heap library uses to allocate data for the heap\r
- * buffers.\r
- * @param[in] none \r
- * @retval @ref Pktlib_HeapIfTable pointer \r
- * @pre @ref netapi_init \r
- */\r
-Pktlib_HeapIfTable *netapi_getPktlibIfTable(void) ;\r
-\r
-/** utilities to see how much mem/descriptor space is remaining **/\r
-\r
-/** @ingroup netapi_api_functions */\r
-/*\r
-* @brief API is used to return the amount of free memory available for allocating buffers\r
- ( for additonal Pktlib heaps. \r
- * @details the applicaiton can use this API to determine how much free memory is .\r
- * available for heap buffers if it decides to create its own. \r
- * @param[in] void \r
- * @retval int : amount of memory available for heap buffer storage (in bytes)\r
- * @pre @ref netapi_init \r
- */\r
-int netapi_getBufMemRemainder(void);\r
-/** @ingroup netapi_api_functions */\r
-\r
-/*\r
-* @brief API is used to return the amount of free memory available for allocating Descriptors \r
- ( for additonal Pktlib heaps. \r
- * @details the applicaiton can use this API to determine how much free memory is .\r
- * available for heap descriptors if it decides to create its own heap. \r
- * @param[in] void \r
- * @retval int : amount of memory available for heap descriptor storage (in bytes)\r
- * @pre @ref netapi_init \r
- */\r
-int netapi_getDescRemainder(void);\r
-\r
-/* utility to get default flow */\r
-/** @ingroup netapi_api_functions */\r
-/*\r
-* @brief API is used to return the default NETCP flow that is to be used for received \r
- ( packets..\r
- * @details the applicaiton can use this API to return the default NETCP flow that is used \r
- * for received packets. A NETCP flow is a list of PacketLib Heaps that are to be\r
- * used to supply free packets to the receive DMA function.\r
- * @param[in] @ref NETAPI_T handle to NETAPI instance \r
- * @retval NETCP_CFG_FLOW_HANDLE_T : handle to default flow\r
- * @pre @ref netapi_init \r
- */\r
-static inline NETCP_CFG_FLOW_HANDLE_T netapi_getDefaultFlow(NETAPI_T p) {\r
-return NETCP_DEFAULT_FLOW;\r
-}\r
-\r
-/* utility to get default route */\r
-/** @ingroup netapi_api_functions */\r
-/*\r
-* @brief API is used to return the default NETCP route\r
- *\r
- * @details this functions returns the default NETCP route created by @ref netapi_init. \r
- * A netcp route consists of a NETCP flow plus a destination pktio channel \r
- * @param[in] @ref NETAPI_T handle to NETAPI instance \r
- * @retval NETCP_CFG_ROUTE_HANDLE_T: the handle of the default route. \r
- * @pre @ref netapi_init \r
- */\r
-static inline NETCP_CFG_ROUTE_HANDLE_T netapi_getDefaultRoute(NETAPI_T p) {\r
-return NETCP_DEFAULT_ROUTE;\r
-}\r
-\r
-/* utility to set/get a cookie in the netapi handle */\r
-/** @ingroup netapi_api_functions */\r
-/*\r
-* @brief API is used to return a piece of application-provided opaque data that has been \r
- ( stored in the netapi instance.\r
- * @details the applicaiton can save a pointer to opaque data in the @ref NETAPI_T instance.\r
- * This APi lets this data be returned to the application.\r
- * @param[in] @ref NETAPI_T handle to NETAPI instance \r
- * @retval void * \r
- * @pre @ref netapi_init @ref netapi_setCookie\r
- */\r
-static inline void * netapi_getCookie(NETAPI_T p)\r
-{\r
-NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;\r
-return pp->cookie;\r
-}\r
-\r
-/** @ingroup netapi_api_functions */\r
-/*\r
-* @brief API is used to set a piece of application-provided opaque data in the\r
- ( netapi instance.\r
- * @details the applicaiton can save a pointer to opaque data in the @ref NETAPI_T instance.\r
- * This APi can be returned later to the application via @ref netapi_getCookie\r
- * @param[in] @ref NETAPI_T : handle to NETAPI instance \r
- * @param[in] void * : opaque data to be saved\r
- * @retval void \r
- * @pre @ref netapi_init \r
- */\r
-static inline void netapi_setCookie(NETAPI_T p, void * cookie)\r
-{\r
-NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;\r
-pp->cookie= cookie;\r
-}\r
-\r
-\r
-/** @ingroup netapi_api_functions */\r
-/*\r
-* @brief API is used to poll for NETCP configuration response messages. \r
- *\r
- * @details Application, if controlling the scheduler, will need to call this\r
- * function periodically to check for NETCP configuration responses (eg\r
- * statistics requests). \r
- * @param[in] @ref NETAPI_T handle to NETAPI instance \r
- * @retval none \r
- * @pre @ref netapi_init \r
- */\r
-void netapi_netcpPoll(NETAPI_T p);\r
-\r
-//heap registration for polling purposes\r
-\r
-/** @ingroup netapi_api_functions */\r
-/*\r
-* @brief API is used to register a heap that is created by application so that\r
- * it's garbage queue can be polled automatically by @ref netapi_poll_heapGarbage(). \r
- *\r
- * @details this function registers an application-created heap with the netapi instance\r
- * so that it can add that heap's garbage queue to the garbage poll function. \r
- * NOTE: netapi internal heap is automatically registered\r
- * @param[in] @ref NETAPI_T handle to NETAPI instance\r
- * @param[in] @ref Pktlib_HeapHandle: handle of heap to register\r
- * @retval int : 1 if OK, <0 on error \r
- * @pre @ref netapi_init @ref Pktlib_CreateHeap\r
- */\r
-static inline int netapi_registerHeap(NETAPI_T p, Pktlib_HeapHandle h)\r
-{\r
-NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;\r
-int i;\r
-for(i=0;i<TUNE_NETAPI_MAX_HEAPS;i++)\r
-{\r
- if (!pp->createdHeaps[i]) {pp->createdHeaps[i]=h; return 1;}\r
-}\r
-return -1; //no room\r
-}\r
-\r
-/** @ingroup netapi_api_functions */\r
-/*\r
-* @brief API is used to un-register a heap that was created by application and previously\r
-* registerd so that\r
- * it's garbage queue could be polled automatically by @ref netapi_poll_heapGarbage(). \r
- *\r
- * @details this function un-registers an application-created heap with the netapi instance\r
- * @param[in] @ref NETAPI_T: handle to NETAPI instance \r
- * @param[in] @ref Pktlib_HeapHandle : handle to heap \r
- * @retval <0 if err, 1 if OK\r
- * @pre @ref netapi_init , @ref Pktlib_CreateHeap, @ref netai_registerHeap()\r
- */\r
-static inline int netapi_unregisterHeap(NETAPI_T p, Pktlib_HeapHandle h)\r
-{\r
-NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;\r
-int i;\r
-for(i=0;i<TUNE_NETAPI_MAX_HEAPS;i++)\r
-{\r
- if (pp->createdHeaps[i] == h) {pp->createdHeaps[i]=NULL; return 1;}\r
-}\r
-return -1; //not found\r
-}\r
-\r
-/** @ingroup netapi_api_functions */\r
-/*\r
-* @brief API is used to remove a created pktlib heap\r
- *\r
- * @details this function removes anapplication-created heap with the netapi instance\r
- * [note -> descriptors are zapped and cannot be reused]\r
- * @param[in] @ref NETAPI_T: handle to NETAPI instance \r
- * @param[in] @ref Pktlib_HeapHandle : handle to heap \r
- * @retval <0 if err, 1 if OK\r
- * @pre @ref netapi_init , @ref Pktlib_CreateHeap, @ref netai_registerHeap()\r
- */\r
-\r
-int netapi_closeHeap(NETAPI_T p, Pktlib_HeapHandle h);\r
-\r
-/** @ingroup netapi_api_functions */\r
-/*\r
-* @brief API is used to poll the garbage collection queue for the internal NETAPI heaps and \r
- * any application created heaps\r
- *\r
- * @details this function is used to poll the netapi internal heaps and any \r
- * application-created heaps that have been registered with the netapi instance. The\r
- * poll function checks the garbage collection queue associated with the heap and returns\r
- * descriptors and buffers when appropriate to the main free queue.\r
- * @param[in] @ref NETAPI_T handle to NETAPI instance \r
- * @retval none \r
- * @pre @ref netapi_init @ref pktlib_CreateHeap\r
- */\r
-void netapi_poll_heapGarbage(NETAPI_T p);\r
-\r
-#endif\r
+/**************************************************************
+ * FILE PURPOSE : -----------NETAPI-------------
+ * user space access to transport resources on SOC
+ **************************************************************
+ * @file netapi.h
+ *
+ * @brief DESCRIPTION: netapi main header file for user space transport
+ * library
+ *
+ * REVISION HISTORY: rev 0.0.1
+ *
+ * Copyright (c) Texas Instruments Incorporated 2010-2011
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*****************************************************************************/
+
+/** @mainpage Network API
+ *
+ * @section intro Introduction
+ *
+ * The network API provides a user space interface to TI SOC transport
+ * Resources. The library includes:
+ * - general startup and setup for user space operations
+ * - memory heap and packet buffer management
+ * - pktio either to/from network or internal queues
+ * - timers for network stacks
+ * - netcp (network co-processor) configuration and control
+ * - utilities including user space synchronization primitivies
+ * - sample scheduling event loop
+ *
+ * NETAPI allows user space transport to configure control the NETCP:
+ * - Classification of packets based on L2: MAC header fields
+ * - Classification of packets based on L3: IP header fields
+ * - Routing of packets to host based on L4 UDP or L5 GTPU ID
+ * - Unidirectional IPSec SA creation and deletion
+ * - Unidirectional IPSec Security Policy creation and deletion
+ *
+ * \par
+ * NOTE:
+ * (C) Copyright 2010-2011 Texas Instruments, Inc.
+ * \par
+ */
+
+/* Define NETAPI as a master group in Doxygen format and add all NETAPI
+ definitions to this group. */
+/** @defgroup netapi USERSPACE TRANSPORT NETAPI
+ * @{
+ */
+/** @} */
+
+
+#ifndef __NETAPI__H
+#define __NETAPI__H
+#include <stdint.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+
+/**
+ * @defgroup netapi_structures NETAPI data structures
+ */
+/** @ingroup netapi */
+
+/** @defgroup netapi_api_functions NETAPI API's
+ * @ingroup netapi_api_functions
+ */
+
+/** @ingroup netapi_structures */
+/**
+ * @def NETAPI_T
+ * @brief netapi handle: one per thread
+ * used in most NETAPI calls
+ */
+typedef void * NETAPI_T;
+
+
+#define NETAPI_SYS_MASTER 2 //master for system
+#define NETAPI_CORE_MASTER 1 //master for core
+#define NETAPI_NO_MASTER 0 //data only
+
+/***********************************************
+*************RUN TIME CONTROLS*****************
+***********************************************/
+typedef struct NETAPI_CFG_Tag
+{
+ int def_mem_size; //bytes of CMA memory we have allocated
+ int def_flow_pkt_rx_offset; //offset in pkt buffer for hw to start RX
+ int def_max_descriptors; //# of descriptors in system (must be power of 2), 2^14 max
+ int def_tot_descriptors_for_us; //#of descriptors to create in our region (must be power of 2)
+ int def_heap_n_descriptors; //# descriptors+buffers in default heap
+ int def_heap_n_zdescriptors; //# zero len descriptors in defaule heap
+ int def_heap_buf_size; //size of buffers in default heap
+}NETAPI_CFG_T;
+
+
+
+#include "netapi_err.h"
+#include "netapi_tune.h"
+#include "ti/runtime/pktlib/pktlib_osal.h"
+#include "ti/runtime/pktlib/pktlib.h"
+#include "pktio.h"
+#include "ti/drv/pa/pa.h"
+#include "netcp_cfg.h"
+#include "netapi_sec.h"
+#include "netapi_sched.h"
+#include "src/netapi_vm.h"
+#include "src/netapi_util.h"
+#include "netsync.h"
+#include "ti/drv/nwal/nwal.h"
+#include "netapi_timer.h"
+#include "src/netapi_loc.h"
+
+/************************************************
+ **********BUILD TIME CONTROLS *****************
+ ***********************************************/
+/* see netapi_tune.h */
+
+
+/*************************************
+ **************NETAPI****************
+ ************************************/
+
+/** @ingroup netapi_api_functions */
+
+/*
+* @brief API instantiates the NETAPI and allocated global resources and is pre-requisite
+ *
+ * @details Allocates global resources valid per system level common across all ARM cores
+ * or per thread based on "master" argument.
+ * Intializes the following substems:
+ * - pktio
+ * - pktlib
+ * - qmss
+ * - cppi
+ * - nwal
+ * @param[in] master mode: NETAPI_SYS_MASTER or NETAPI_NO_MASTER
+ * @param[in] configuration (master mode). pointer to NETAPI_CFG_T above or NULL
+ * @retval @ref NETAPI_T: handle to the instance or NULL on error
+ * @pre none
+ */
+NETAPI_T netapi_init(int master, NETAPI_CFG_T * p_cfg);
+
+
+/** @ingroup netapi_api_functions */
+/*
+* @brief API shutdowns a previously intialized NETAPI instance
+ *
+ * @details de-llocates global resources valid per system level common across all ARM cores
+ * or per thread based on "master" argument passed in at init time.
+ * @param[in] @ref NETAPI_T: handle to the instance
+ * @retval none
+ * @pre @ref netapi_init
+ */
+void netapi_shutdown(NETAPI_T p);
+
+/** @ingroup netapi_api_functions */
+/*
+
+* @brief API returns a @ref Pktlib_HeapIfTable to use when creating pktlib heaps
+ *
+ * @details Application will need a heapIfTable in order to create its own heaps. This
+ * function returns a table that can be passed in the call to @ref Pktlib_CreateHeap
+* The memory used for these heaps is special:
+ * - specific alignment,
+ * - must be contguous,
+ * - must have a physical to virtual mapping that
+ * - is known by NETAPI.
+ * Thus it must be completely managed by NETAPI. This interfaced table provides a
+ * malloc function that the pktlib heap library uses to allocate data for the heap
+ * buffers.
+ * @param[in] none
+ * @retval @ref Pktlib_HeapIfTable pointer
+ * @pre @ref netapi_init
+ */
+Pktlib_HeapIfTable *netapi_getPktlibIfTable(void) ;
+
+/** utilities to see how much mem/descriptor space is remaining **/
+
+/** @ingroup netapi_api_functions */
+/*
+* @brief API is used to return the amount of free memory available for allocating buffers
+ ( for additonal Pktlib heaps.
+ * @details the applicaiton can use this API to determine how much free memory is .
+ * available for heap buffers if it decides to create its own.
+ * @param[in] void
+ * @retval int : amount of memory available for heap buffer storage (in bytes)
+ * @pre @ref netapi_init
+ */
+int netapi_getBufMemRemainder(void);
+/** @ingroup netapi_api_functions */
+
+/*
+* @brief API is used to return the amount of free memory available for allocating Descriptors
+ ( for additonal Pktlib heaps.
+ * @details the applicaiton can use this API to determine how much free memory is .
+ * available for heap descriptors if it decides to create its own heap.
+ * @param[in] void
+ * @retval int : amount of memory available for heap descriptor storage (in bytes)
+ * @pre @ref netapi_init
+ */
+int netapi_getDescRemainder(void);
+
+/* utility to get default flow */
+/** @ingroup netapi_api_functions */
+/*
+* @brief API is used to return the default NETCP flow that is to be used for received
+ ( packets..
+ * @details the applicaiton can use this API to return the default NETCP flow that is used
+ * for received packets. A NETCP flow is a list of PacketLib Heaps that are to be
+ * used to supply free packets to the receive DMA function.
+ * @param[in] @ref NETAPI_T handle to NETAPI instance
+ * @retval NETCP_CFG_FLOW_HANDLE_T : handle to default flow
+ * @pre @ref netapi_init
+ */
+static inline NETCP_CFG_FLOW_HANDLE_T netapi_getDefaultFlow(NETAPI_T p) {
+return NETCP_DEFAULT_FLOW;
+}
+
+/* utility to get default route */
+/** @ingroup netapi_api_functions */
+/*
+* @brief API is used to return the default NETCP route
+ *
+ * @details this functions returns the default NETCP route created by @ref netapi_init.
+ * A netcp route consists of a NETCP flow plus a destination pktio channel
+ * @param[in] @ref NETAPI_T handle to NETAPI instance
+ * @retval NETCP_CFG_ROUTE_HANDLE_T: the handle of the default route.
+ * @pre @ref netapi_init
+ */
+static inline NETCP_CFG_ROUTE_HANDLE_T netapi_getDefaultRoute(NETAPI_T p) {
+return NETCP_DEFAULT_ROUTE;
+}
+
+/* utility to set/get a cookie in the netapi handle */
+/** @ingroup netapi_api_functions */
+/*
+* @brief API is used to return a piece of application-provided opaque data that has been
+ ( stored in the netapi instance.
+ * @details the applicaiton can save a pointer to opaque data in the @ref NETAPI_T instance.
+ * This APi lets this data be returned to the application.
+ * @param[in] @ref NETAPI_T handle to NETAPI instance
+ * @retval void *
+ * @pre @ref netapi_init @ref netapi_setCookie
+ */
+static inline void * netapi_getCookie(NETAPI_T p)
+{
+NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;
+return pp->cookie;
+}
+
+/** @ingroup netapi_api_functions */
+/*
+* @brief API is used to set a piece of application-provided opaque data in the
+ ( netapi instance.
+ * @details the applicaiton can save a pointer to opaque data in the @ref NETAPI_T instance.
+ * This APi can be returned later to the application via @ref netapi_getCookie
+ * @param[in] @ref NETAPI_T : handle to NETAPI instance
+ * @param[in] void * : opaque data to be saved
+ * @retval void
+ * @pre @ref netapi_init
+ */
+static inline void netapi_setCookie(NETAPI_T p, void * cookie)
+{
+NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;
+pp->cookie= cookie;
+}
+
+
+/** @ingroup netapi_api_functions */
+/*
+* @brief API is used to poll for NETCP configuration response messages.
+ *
+ * @details Application, if controlling the scheduler, will need to call this
+ * function periodically to check for NETCP configuration responses (eg
+ * statistics requests).
+ * @param[in] @ref NETAPI_T handle to NETAPI instance
+ * @retval none
+ * @pre @ref netapi_init
+ */
+void netapi_netcpPoll(NETAPI_T p);
+
+//heap registration for polling purposes
+
+/** @ingroup netapi_api_functions */
+/*
+* @brief API is used to register a heap that is created by application so that
+ * it's garbage queue can be polled automatically by @ref netapi_poll_heapGarbage().
+ *
+ * @details this function registers an application-created heap with the netapi instance
+ * so that it can add that heap's garbage queue to the garbage poll function.
+ * NOTE: netapi internal heap is automatically registered
+ * @param[in] @ref NETAPI_T handle to NETAPI instance
+ * @param[in] @ref Pktlib_HeapHandle: handle of heap to register
+ * @retval int : 1 if OK, <0 on error
+ * @pre @ref netapi_init @ref Pktlib_CreateHeap
+ */
+static inline int netapi_registerHeap(NETAPI_T p, Pktlib_HeapHandle h)
+{
+NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;
+int i;
+for(i=0;i<TUNE_NETAPI_MAX_HEAPS;i++)
+{
+ if (!pp->createdHeaps[i]) {pp->createdHeaps[i]=h; return 1;}
+}
+return -1; //no room
+}
+
+/** @ingroup netapi_api_functions */
+/*
+* @brief API is used to un-register a heap that was created by application and previously
+* registerd so that
+ * it's garbage queue could be polled automatically by @ref netapi_poll_heapGarbage().
+ *
+ * @details this function un-registers an application-created heap with the netapi instance
+ * @param[in] @ref NETAPI_T: handle to NETAPI instance
+ * @param[in] @ref Pktlib_HeapHandle : handle to heap
+ * @retval <0 if err, 1 if OK
+ * @pre @ref netapi_init , @ref Pktlib_CreateHeap, @ref netai_registerHeap()
+ */
+static inline int netapi_unregisterHeap(NETAPI_T p, Pktlib_HeapHandle h)
+{
+NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;
+int i;
+for(i=0;i<TUNE_NETAPI_MAX_HEAPS;i++)
+{
+ if (pp->createdHeaps[i] == h) {pp->createdHeaps[i]=NULL; return 1;}
+}
+return -1; //not found
+}
+
+/** @ingroup netapi_api_functions */
+/*
+* @brief API is used to remove a created pktlib heap
+ *
+ * @details this function removes anapplication-created heap with the netapi instance
+ * [note -> descriptors are zapped and cannot be reused]
+ * @param[in] @ref NETAPI_T: handle to NETAPI instance
+ * @param[in] @ref Pktlib_HeapHandle : handle to heap
+ * @retval <0 if err, 1 if OK
+ * @pre @ref netapi_init , @ref Pktlib_CreateHeap, @ref netai_registerHeap()
+ */
+
+int netapi_closeHeap(NETAPI_T p, Pktlib_HeapHandle h);
+
+/** @ingroup netapi_api_functions */
+/*
+* @brief API is used to poll the garbage collection queue for the internal NETAPI heaps and
+ * any application created heaps
+ *
+ * @details this function is used to poll the netapi internal heaps and any
+ * application-created heaps that have been registered with the netapi instance. The
+ * poll function checks the garbage collection queue associated with the heap and returns
+ * descriptors and buffers when appropriate to the main free queue.
+ * @param[in] @ref NETAPI_T handle to NETAPI instance
+ * @retval none
+ * @pre @ref netapi_init @ref pktlib_CreateHeap
+ */
+void netapi_poll_heapGarbage(NETAPI_T p);
+
+#endif
index ef7bc775a2a3e3534631b0f374c3d36da7cd6adf..7ee7f7511f458584e6d209bdfec7c2dd8b868c4a 100755 (executable)
-/*********************************\r
- *FILE: pktio.h\r
- *PURPOSE: pktio library header\r
- **************************************************************\r
- * @file pktio.h\r
- * \r
- * @bried DESCRIPTION: pktio module main header file for user space transport\r
- * library\r
- * \r
- * REVISION HISTORY: rev 0.0.1 \r
- *\r
- * Copyright (c) Texas Instruments Incorporated 2010-2011\r
- * \r
- * Redistribution and use in source and binary forms, with or without \r
- * modification, are permitted provided that the following conditions \r
- * are met:\r
- *\r
- * Redistributions of source code must retain the above copyright \r
- * notice, this list of conditions and the following disclaimer.\r
- *\r
- * Redistributions in binary form must reproduce the above copyright\r
- * notice, this list of conditions and the following disclaimer in the \r
- * documentation and/or other materials provided with the \r
- * distribution.\r
- *\r
- * Neither the name of Texas Instruments Incorporated nor the names of\r
- * its contributors may be used to endorse or promote products derived\r
- * from this software without specific prior written permission.\r
- *\r
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \r
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT \r
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT \r
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, \r
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT \r
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \r
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \r
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
-\r
- ********************************/\r
-#ifndef __PKTIO__H\r
-#define __PKTIO__H\r
-#include "netapi.h"\r
-#include "ti/runtime/pktlib/pktlib.h"\r
-#include "ti/drv/nwal/nwal.h"\r
-#include "netapi_err.h"\r
-\r
-/*--------------------defines-----------------------*/\r
-#define PKTIO_NOMEM NETAPI_ERR_NOMEM\r
-//default pktio channels \r
-#define NETCP_TX "NETCP_TX"\r
-#define NETCP_RX "NETCP_RX"\r
-#define NETCP_SB_RX "NETCP_SB_RX"\r
-#define NETCP_SB_TX "NETCP_SB_TX"\r
-#define PKTIO_MAX_NAME 19 \r
-\r
-/*--------------------data structures----------------*/\r
-typedef struct PKTIO_METADATA_Tag\r
-{\r
- int flags1;\r
-#define PKTIO_META_RX 0x01\r
-#define PKTIO_META_TX 0x02\r
-#define PKTIO_META_SB_RX 0x4 //SB crypto rx \r
-#define PKTIO_META_SB_TX 0x8 //SB crypto tx\r
-#define PKTIO_META_APP_DEF 0x80000000\r
- union\r
- {\r
- nwalRxPktInfo_t * rx_meta;\r
- nwalTxPktInfo_t * tx_meta;\r
- nwalDmRxPayloadInfo_t * rx_sb_meta;\r
- nwalDmTxPayloadInfo_t * tx_sb_meta;\r
- } u;\r
- void * sa_handle; //valid for PKTIO_META_TX with IPSEC inflow or PKTIO_PKTIO_META_SB_TX . \r
- // MUST BE nwal_HANDLE_INVALID otherwise\r
-} PKTIO_METADATA_T;\r
-\r
-/* the callback function */\r
-struct PKTIO_HANDLE_tag;\r
-\r
-//polling control\r
-typedef struct PKTIO_POLL_Tag\r
-{\r
-/* future */\r
-} PKTIO_POLL_T;\r
-\r
-#define PKTIO_MAX_RECV (TUNE_NETAPI_MAX_BURST_RCV)\r
-typedef void (*PKTIO_CB)(struct PKTIO_HANDLE_tag * channel, Ti_Pkt* p_recv[],\r
- PKTIO_METADATA_T p_meta[], int n_pkts,\r
- uint64_t ts );\r
-\r
-typedef int (*PKTIO_SEND)(struct PKTIO_HANDLE_tag * channel, Ti_Pkt* p_send,\r
- PKTIO_METADATA_T *p_meta, int * p_err);\r
-\r
-typedef int (*PKTIO_POLL)(struct PKTIO_HANDLE_tag * channel,PKTIO_POLL_T * p_poll_cfg ,\r
- int * p_err);\r
-\r
-/** channel configuration */\r
-#define PKTIO_NA 0\r
-typedef struct PKTIO_CFG_Tag\r
-{\r
-#define PKTIO_R 0x1\r
-#define PKTIO_W 0x2\r
-#define PKTIO_RW (PKTIO_R | PKTIO_W)\r
-int flags1;\r
-\r
-#define PKTIO_LOCAL 0x2 \r
-#define PKTIO_GLOBAL 0x1\r
-#define PKTIO_PKT 0x4 //define this if this channel is for NETCP \r
-#define PKTIO_SB 0x8 //define this if this channel is for sideband crypto \r
-int flags2;\r
-\r
-//for create\r
-#define PKTIO_Q_ANY -1\r
-int qnum;\r
-\r
-//for poll\r
-int max_n;\r
-}PKTIO_CFG_T;\r
-\r
-struct NETAPI_tag;\r
-\r
-/* a pktio channel .. */\r
-\r
-typedef struct PKTIO_HANDLE_Tag\r
-{\r
-#define PKTIO_INUSE 0xfeedfeed\r
- int inuse;\r
- int use_nwal; /* true if this is managed by nwal */\r
-#define PKTIO_4_IPC 0 //For IPC\r
-#define PKTIO_4_ADJ_NWAL 1 //(RX)app queues managed by NWAL\r
-#define PKTIO_DEF_NWAL 2 // default NWAL RX/TX queues\r
-#define PKTIO_4_ADJ_SB 3 //(RX) crypto side band app defined\r
-#define PKTIO_DEF_SB 4 //crypto side band default\r
- struct NETAPI_tag * back; /* back handle */\r
- void * nwalInstanceHandle; /* save here for conveninece */\r
- PKTIO_CB cb; /* callback for channel */\r
- PKTIO_CFG_T cfg; /* configuration */\r
- Qmss_QueueHnd q; /* the associated queue handle */\r
- Qmss_Queue qInfo; /* and its qm#/q# */\r
- int max_n; /* max # of pkts to read in one poll */\r
- void * cookie; /* app specific */\r
- PKTIO_SEND _send; /* pktio type specific send function */\r
- PKTIO_POLL _poll; /* pktio type specific POLL function */\r
- char name[PKTIO_MAX_NAME+1];\r
-} PKTIO_HANDLE_T;\r
-\r
-\r
-\r
-typedef struct PKTIO_CONTROL_Tag\r
-{\r
-#define PKTIO_CLEAR 0x1 //clear out the channel\r
-#define PKTIO_DIVERT 0x2 //divert, (to dest channel)\r
- int op;\r
- PKTIO_HANDLE_T *dest;\r
-} PKTIO_CONTROL_T;\r
-\r
-\r
-\r
-/*---------------------------------------------------*/\r
-/*-------------------------API-----------------------*/\r
-/*---------------------------------------------------*/\r
-\r
-/*\r
-* @brief API creates a NETAPI channel \r
- *\r
- * @details This assigns global resources to a NETAPI pktio channel.\r
- * Once created, the channel can be used to send and/or receive\r
- * a TI @ref Ti_Pkt. This can be used for communication with the \r
- * the Network co-processor (NETCP) or for internal inter-processor\r
- * communication. The channel is saved under the assigned name \r
- * and can be opened by other netapi threads instances.\r
- * @param[in] @ref NETAPI_T: handle to the instance \r
- * @param[in] char * name: a pointer to the char string name for channel\r
- * @param[in] @ref PKTIO_CB : callback to be issued on packet receive\r
- * @param[in] @ref PKTIO_CFG_T: pointer to channel configuration\r
- * @param[out] int * err: pointer to error return \r
- * @retval @ref PKTIO_HANDLE_T: handle to the pktio instance or NULL on error\r
- * @pre @ref netapi_init \r
- */\r
-PKTIO_HANDLE_T * pktio_create(NETAPI_T netapi_handle, /* netapi instance */\r
- char * name, /* name of the channel */\r
- PKTIO_CB cb, /* receive callback */\r
- PKTIO_CFG_T * p_cfg, /* ptr to config*/\r
- int * err);\r
-\r
-/*\r
-* @brief API opens an existing NETAPI channel\r
- *\r
- * @details This opens an NETAPI pktio channel for use. The channel\r
- * must have already been created via @ref pktio_create or may have\r
- * been created internally during the netapi intialization.\r
- * Once opened, the channel can be used to send and/or receive\r
- * a TI @ref Ti_Pkt. This can be used for communication with the \r
- * the Network co-processor (NETCP) or for internal inter-processor\r
- * communication. \r
- *\r
- * @param[in] @ref NETAPI_T: handle to the instance \r
- * @param[in] char * name: a pointer to the char string name for channel\r
- * @param[in] @ref PKTIO_CB : callback to be issued on packet receive\r
- * @param[in] @ref PKTIO_CFG_T: pointer to channel configuration\r
- * @param[out] int * err: pointer to error return\r
- * @retval @ref PKTIO_HANDLE_T: handle to the pktio instance or NULL on error\r
- * @pre @ref netapi_init , @ref pktio_create\r
- */\r
-PKTIO_HANDLE_T * pktio_open(NETAPI_T netapi_handle, /* netapi instance */\r
- char *name, /* name of channel to open */\r
- PKTIO_CB cb, /* receive callback */\r
- PKTIO_CFG_T *p_cfg, /* channel configuration */\r
- int * err);\r
-\r
-/* future: control the channel */\r
-void pktio_control(PKTIO_HANDLE_T * channel, //handle from open or create\r
- PKTIO_CB cb, //change the callback\r
- PKTIO_CFG_T * p_cfg,//optional \r
- PKTIO_CONTROL_T *p_control,//optional\r
- int *err);\r
- \r
-/* future: close or delete a pktio channel */\r
-void pktio_close(PKTIO_HANDLE_T * channel, int * err);\r
-void pktio_delete(PKTIO_HANDLE_T * channel, int * err);\r
-\r
-/*\r
-* @brief API sends data to a pktio channel \r
- *\r
- * @details This sends a @ref Ti_Pkt and associated meta data, \r
- * @ref PKTIO_METADATA_T to a channel. The channel\r
- * must have already been created via @ref pktio_create or opened\r
- * via @ref pktio_open. It may have\r
- * been created internally during the netapi intialization.\r
- * @param[in] @ref PKTIO_HANDLE_T: handle to the channel\r
- * @param[in] @ref Ti_Pkt*: pointer to the packet to send\r
- * @param[in] @ref PKTIO_METADATA_T: pointer to meta data associated with packet \r
- * @param[out] int * err: pointer to error return\r
- * @retval int npkts: 1 if packet sent, 0 if error \r
- * @pre @ref netapi_init, @ref pktio_create, @ref pktio_open\r
- */\r
-static inline int pktio_send(PKTIO_HANDLE_T * channel, /* the channel */\r
- Ti_Pkt *pkt, /* pointer to packet */\r
- PKTIO_METADATA_T *m, /* pointer to meta data */\r
- int * err)\r
-{\r
- return channel->_send((struct PKTIO_HANDLE_tag *)channel, pkt, m, err);\r
-}\r
-\r
-/*\r
-* @brief API sends data to a pktio channel\r
- *\r
- * @details This sends an array of @ref Ti_Pkt and associated meta data,\r
- * @ref PKTIO_METADATA_T to a channel. The channel\r
- * must have already been created via @ref pktio_create or opened\r
- * via @ref pktio_open. It may have\r
- * been created internally during the netapi intialization.\r
- * @param[in] @ref PKTIO_HANDLE_T: handle to the channel\r
- * @param[in] @ref Ti_Pkt*: pointer to the packet to send\r
- * @param[in] @ref PKTIO_METADATA_T: pointer to meta data associated with packet\r
- * @oaran[in[ int np: the number of packets in list to send\r
- * @param[out] int * err: pointer to error return\r
- * @retval int npkts: number of packets sent, 0 if error\r
- * @pre @ref netapi_init, @ref pktio_create, @ref pktio_open\r
- */\r
-int pktio_sendMulti(PKTIO_HANDLE_T *channel, /* the channel handle */\r
- Ti_Pkt * pkt[], /* array of packets to send */\r
- PKTIO_METADATA_T * m[], /* meta data array */\r
- int np, /* number of packets to send */\r
- int * err);\r
-\r
-/***********************************/\r
-/************* polling **************/\r
-/***********************************/\r
-\r
-/*\r
-* @brief API polls a pkto channel for received packets \r
- *\r
- * @details This api polls a pktio channel. Any pending data in the channel is\r
- * passed to the @ref PKTIO_CB registered when the channel was\r
- * created or opened. The channel must\r
- * have already been created via @ref pktio_create or opened\r
- * via @ref pktio_open. It may have\r
- * been created internally during the netapi intialization.\r
- * @param[in] @ref PKTIO_HANDLE_T: handle to the channel\r
- * @param[in] @ref PKTIO_POLL_T *: pointer to pktio poll configuration\r
- * @param[out] int * err: pointer to error return\r
- * @retval int npkts: number of packets received by poll \r
- * @pre @ref netapi_init, @ref pktio_create, @ref pktio_open\r
- */\r
-static inline int pktio_poll(PKTIO_HANDLE_T * handle, //handle to pktio\r
- PKTIO_POLL_T * p_poll_cfg, //polling configuration\r
- int * err)\r
-{\r
- handle->_poll((struct PKTIO_HANDLE_tag *) handle, p_poll_cfg, err);\r
-}\r
-\r
-/*\r
-* @brief API polls all pkto channels associarted with @ref NETAPI_T instance\r
- * for received packets\r
- *\r
- * @details This api polls all pktio channels attached to an instance.\r
- * Any pending data in these channels are \r
- * passed to the @ref PKTIO_CB registered when the channel was\r
- * created or opened. The channels must\r
- * have already been created via @ref pktio_create or opened\r
- * via @ref pktio_open. They may have\r
- * been created internally during the netapi intialization.\r
- * @param[in] @ref NETAPI_T: handle of the NETAPI instance \r
- * @param[in] @ref PKTIO_POLL_T *: pointer to pktio poll configuration\r
- * @param[out] int * err: pointer to error return\r
- * @retval int npkts: number of packets received by poll\r
- * @pre @ref netapi_init, @ref pktio_create, @ref pktio_open\r
- */\r
-int pktio_pollAll(NETAPI_T handle, PKTIO_POLL_T * p_poll_cfg, int *err);\r
-\r
-/*----------------- utilities------------------ */\r
-/* update max_n for poll */\r
-#define pktio_set_max_n(handle,max_n) (handle)->max_n=max_n;\r
-#define pktio_get_netapi_handle(handle) (handle)->back\r
-#define pktio_set_cookie(handle, cookie) (handle)->cookie = cookie\r
-#define pktio_get_cookie(handle) (handle)->cookie\r
-#define pktio_get_q(handle) (handle)->q\r
-\r
-/*-----------------Extra Fast Path pkt meta data macros--------------------*/\r
-#include "cppi_desc.h"\r
-#include "ti/drv/pa/pa.h"\r
-#include "ti/drv/pa/pasahost.h"\r
-\r
-\r
-//return default packet queue to poll for netcp RX\r
-//these are expensive calls, so call once and save\r
-static inline Qmss_QueueHnd PKTIO_GET_DEFAULT_NETCP_Q(PKTIO_HANDLE_T *h)\r
-{\r
-nwalGlobCxtInfo_t Info;\r
-nwal_getGlobCxtInfo(h->nwalInstanceHandle,&Info);\r
-return Info.rxDefPktQ;\r
-}\r
-\r
-//return L4Queue to poll for netcp RX (L4 classifier queue)\r
-//these are expensive calls, so call once and save\r
-static inline Qmss_QueueHnd PKTIO_GET_DEFAULT_NETCP_L4Q(PKTIO_HANDLE_T *h)\r
-{\r
-nwalLocCxtInfo_t Info;\r
-nwal_getLocCxtInfo(h->nwalInstanceHandle,&Info);\r
-return Info.rxL4PktQ;\r
-}\r
-\r
-\r
-/* find pointer to proto info fields in descriptor */\r
-static inline pasahoLongInfo_t* PKTIO_GET_PROTO_INFO( Ti_Pkt * pkt)\r
-{\r
-pasahoLongInfo_t* pinfo;\r
-uint32_t infoLen;\r
-Cppi_getPSData (Cppi_DescType_HOST, Cppi_PSLoc_PS_IN_DESC,(Cppi_Desc *)pkt, (uint8_t **)&pinfo, &infoLen);\r
-return pinfo;\r
-}\r
-\r
-/** "p" below is return of PKTIO_GET_PROTO_INFO() above**/\r
-\r
-/* offset to L3 header */\r
-#define PKTIO_GET_L3_OFFSET(p) PASAHO_LINFO_READ_L3_OFFSET(p)\r
-\r
-/* offset to L4 header */\r
-#define PKTIO_GET_L4_OFFSET(p) PASAHO_LINFO_READ_L4_OFFSET(p)\r
-\r
-/* next proto header */\r
-#define PKTIO_GET_NEXT_HEADER_TYPE(P) PASAHO_LINFO_READ_NXT_HDR_TYPE(p)\r
-\r
-/* offset to L4 payload */\r
-#define PKTIO_GET_L5_OFFSET(p) PASAHO_LINFO_READ_L5_OFFSET(p)\r
-\r
-/* end of L4 payload */\r
-#define PKTIO_GET_PAYLOAD_END(p) PASAHO_LINFO_READ_END_OFFSET(p)\r
-\r
-/* IPSEC ESP done ? */\r
-#define PKTIO_ESP_DONE(p) PASAHO_LINFO_IS_IPSEC_ESP(p)\r
-\r
-/* IPSEC ESP done ? */\r
-#define PKTIO_AH_DONE(p) PASAHO_LINFO_IS_IPSEC_AH(p)\r
-\r
-/* MAC info */\r
-#define PKTIO_IS_MAC_BROADCAST(p) PASAHO_READ_BITFIELD(p)\r
-#define PKTIO_IS_MAC_MULTICAST(p) PASAHO_READ_BITFIELD(p)\r
-#define PKTIO_GET_MAC_TYPE(p) PASAHO_LINFO_READ_MAC_PKTTYPE(p)\r
-\r
-/* read input port */\r
-#define PKTIO_GET_INPUT_PORT(p) PASAHO_LINFO_READ_INPORT(p) \r
-\r
-/* AppId */\r
-static inline unsigned int PKTIO_GET_APPID( Ti_Pkt * pkt) \r
-{\r
-unsigned char * p_swinfo0;\r
-Cppi_getSoftwareInfo (Cppi_DescType_HOST,\r
- (Cppi_Desc *)pkt,\r
- &p_swinfo0);\r
-return *((unsigned int *)p_swinfo0);\r
-} \r
-\r
-#endif\r
+/*********************************
+ *FILE: pktio.h
+ *PURPOSE: pktio library header
+ **************************************************************
+ * @file pktio.h
+ *
+ * @bried DESCRIPTION: pktio module main header file for user space transport
+ * library
+ *
+ * REVISION HISTORY: rev 0.0.1
+ *
+ * Copyright (c) Texas Instruments Incorporated 2010-2011
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************/
+#ifndef __PKTIO__H
+#define __PKTIO__H
+#include "netapi.h"
+#include "ti/runtime/pktlib/pktlib.h"
+#include "ti/drv/nwal/nwal.h"
+#include "ti/drv/nwal/nwal_util.h"
+#include "netapi_err.h"
+
+/*--------------------defines-----------------------*/
+#define PKTIO_NOMEM NETAPI_ERR_NOMEM
+//default pktio channels
+#define NETCP_TX "NETCP_TX"
+#define NETCP_RX "NETCP_RX"
+#define NETCP_SB_RX "NETCP_SB_RX"
+#define NETCP_SB_TX "NETCP_SB_TX"
+#define PKTIO_MAX_NAME 19
+
+/*--------------------data structures----------------*/
+typedef struct PKTIO_METADATA_Tag
+{
+ int flags1;
+#define PKTIO_META_RX 0x01
+#define PKTIO_META_TX 0x02
+#define PKTIO_META_SB_RX 0x4 //SB crypto rx
+#define PKTIO_META_SB_TX 0x8 //SB crypto tx
+#define PKTIO_META_APP_DEF 0x80000000
+ union
+ {
+ nwalRxPktInfo_t * rx_meta;
+ nwalTxPktInfo_t * tx_meta;
+ nwalDmRxPayloadInfo_t * rx_sb_meta;
+ nwalDmTxPayloadInfo_t * tx_sb_meta;
+ } u;
+ void * sa_handle; //valid for PKTIO_META_TX with IPSEC inflow or PKTIO_PKTIO_META_SB_TX .
+ // MUST BE nwal_HANDLE_INVALID otherwise
+} PKTIO_METADATA_T;
+
+/* the callback function */
+struct PKTIO_HANDLE_tag;
+
+//polling control
+typedef struct PKTIO_POLL_Tag
+{
+/* future */
+} PKTIO_POLL_T;
+
+#define PKTIO_MAX_RECV (TUNE_NETAPI_MAX_BURST_RCV)
+typedef void (*PKTIO_CB)(struct PKTIO_HANDLE_tag * channel, Ti_Pkt* p_recv[],
+ PKTIO_METADATA_T p_meta[], int n_pkts,
+ uint64_t ts );
+
+typedef int (*PKTIO_SEND)(struct PKTIO_HANDLE_tag * channel, Ti_Pkt* p_send,
+ PKTIO_METADATA_T *p_meta, int * p_err);
+
+typedef int (*PKTIO_POLL)(struct PKTIO_HANDLE_tag * channel,PKTIO_POLL_T * p_poll_cfg ,
+ int * p_err);
+
+/** channel configuration */
+#define PKTIO_NA 0
+typedef struct PKTIO_CFG_Tag
+{
+#define PKTIO_R 0x1
+#define PKTIO_W 0x2
+#define PKTIO_RW (PKTIO_R | PKTIO_W)
+int flags1;
+
+#define PKTIO_LOCAL 0x2
+#define PKTIO_GLOBAL 0x1
+#define PKTIO_PKT 0x4 //define this if this channel is for NETCP
+#define PKTIO_SB 0x8 //define this if this channel is for sideband crypto
+int flags2;
+
+//for create
+#define PKTIO_Q_ANY -1
+int qnum;
+
+//for poll
+int max_n;
+}PKTIO_CFG_T;
+
+struct NETAPI_tag;
+
+/* a pktio channel .. */
+
+typedef struct PKTIO_HANDLE_Tag
+{
+#define PKTIO_INUSE 0xfeedfeed
+ int inuse;
+ int use_nwal; /* true if this is managed by nwal */
+#define PKTIO_4_IPC 0 //For IPC
+#define PKTIO_4_ADJ_NWAL 1 //(RX)app queues managed by NWAL
+#define PKTIO_DEF_NWAL 2 // default NWAL RX/TX queues
+#define PKTIO_4_ADJ_SB 3 //(RX) crypto side band app defined
+#define PKTIO_DEF_SB 4 //crypto side band default
+ struct NETAPI_tag * back; /* back handle */
+ void * nwalInstanceHandle; /* save here for conveninece */
+ PKTIO_CB cb; /* callback for channel */
+ PKTIO_CFG_T cfg; /* configuration */
+ Qmss_QueueHnd q; /* the associated queue handle */
+ Qmss_Queue qInfo; /* and its qm#/q# */
+ int max_n; /* max # of pkts to read in one poll */
+ void * cookie; /* app specific */
+ PKTIO_SEND _send; /* pktio type specific send function */
+ PKTIO_POLL _poll; /* pktio type specific POLL function */
+ char name[PKTIO_MAX_NAME+1];
+} PKTIO_HANDLE_T;
+
+
+
+typedef struct PKTIO_CONTROL_Tag
+{
+#define PKTIO_CLEAR 0x1 //clear out the channel
+#define PKTIO_DIVERT 0x2 //divert, (to dest channel)
+ int op;
+ PKTIO_HANDLE_T *dest;
+} PKTIO_CONTROL_T;
+
+
+
+/*---------------------------------------------------*/
+/*-------------------------API-----------------------*/
+/*---------------------------------------------------*/
+
+/*
+* @brief API creates a NETAPI channel
+ *
+ * @details This assigns global resources to a NETAPI pktio channel.
+ * Once created, the channel can be used to send and/or receive
+ * a TI @ref Ti_Pkt. This can be used for communication with the
+ * the Network co-processor (NETCP) or for internal inter-processor
+ * communication. The channel is saved under the assigned name
+ * and can be opened by other netapi threads instances.
+ * @param[in] @ref NETAPI_T: handle to the instance
+ * @param[in] char * name: a pointer to the char string name for channel
+ * @param[in] @ref PKTIO_CB : callback to be issued on packet receive
+ * @param[in] @ref PKTIO_CFG_T: pointer to channel configuration
+ * @param[out] int * err: pointer to error return
+ * @retval @ref PKTIO_HANDLE_T: handle to the pktio instance or NULL on error
+ * @pre @ref netapi_init
+ */
+PKTIO_HANDLE_T * pktio_create(NETAPI_T netapi_handle, /* netapi instance */
+ char * name, /* name of the channel */
+ PKTIO_CB cb, /* receive callback */
+ PKTIO_CFG_T * p_cfg, /* ptr to config*/
+ int * err);
+
+/*
+* @brief API opens an existing NETAPI channel
+ *
+ * @details This opens an NETAPI pktio channel for use. The channel
+ * must have already been created via @ref pktio_create or may have
+ * been created internally during the netapi intialization.
+ * Once opened, the channel can be used to send and/or receive
+ * a TI @ref Ti_Pkt. This can be used for communication with the
+ * the Network co-processor (NETCP) or for internal inter-processor
+ * communication.
+ *
+ * @param[in] @ref NETAPI_T: handle to the instance
+ * @param[in] char * name: a pointer to the char string name for channel
+ * @param[in] @ref PKTIO_CB : callback to be issued on packet receive
+ * @param[in] @ref PKTIO_CFG_T: pointer to channel configuration
+ * @param[out] int * err: pointer to error return
+ * @retval @ref PKTIO_HANDLE_T: handle to the pktio instance or NULL on error
+ * @pre @ref netapi_init , @ref pktio_create
+ */
+PKTIO_HANDLE_T * pktio_open(NETAPI_T netapi_handle, /* netapi instance */
+ char *name, /* name of channel to open */
+ PKTIO_CB cb, /* receive callback */
+ PKTIO_CFG_T *p_cfg, /* channel configuration */
+ int * err);
+
+/* future: control the channel */
+void pktio_control(PKTIO_HANDLE_T * channel, //handle from open or create
+ PKTIO_CB cb, //change the callback
+ PKTIO_CFG_T * p_cfg,//optional
+ PKTIO_CONTROL_T *p_control,//optional
+ int *err);
+
+/* future: close or delete a pktio channel */
+void pktio_close(PKTIO_HANDLE_T * channel, int * err);
+void pktio_delete(PKTIO_HANDLE_T * channel, int * err);
+
+/*
+* @brief API sends data to a pktio channel
+ *
+ * @details This sends a @ref Ti_Pkt and associated meta data,
+ * @ref PKTIO_METADATA_T to a channel. The channel
+ * must have already been created via @ref pktio_create or opened
+ * via @ref pktio_open. It may have
+ * been created internally during the netapi intialization.
+ * @param[in] @ref PKTIO_HANDLE_T: handle to the channel
+ * @param[in] @ref Ti_Pkt*: pointer to the packet to send
+ * @param[in] @ref PKTIO_METADATA_T: pointer to meta data associated with packet
+ * @param[out] int * err: pointer to error return
+ * @retval int npkts: 1 if packet sent, 0 if error
+ * @pre @ref netapi_init, @ref pktio_create, @ref pktio_open
+ */
+static inline int pktio_send(PKTIO_HANDLE_T * channel, /* the channel */
+ Ti_Pkt *pkt, /* pointer to packet */
+ PKTIO_METADATA_T *m, /* pointer to meta data */
+ int * err)
+{
+ return channel->_send((struct PKTIO_HANDLE_tag *)channel, pkt, m, err);
+}
+
+/*
+* @brief API sends data to a pktio channel
+ *
+ * @details This sends an array of @ref Ti_Pkt and associated meta data,
+ * @ref PKTIO_METADATA_T to a channel. The channel
+ * must have already been created via @ref pktio_create or opened
+ * via @ref pktio_open. It may have
+ * been created internally during the netapi intialization.
+ * @param[in] @ref PKTIO_HANDLE_T: handle to the channel
+ * @param[in] @ref Ti_Pkt*: pointer to the packet to send
+ * @param[in] @ref PKTIO_METADATA_T: pointer to meta data associated with packet
+ * @oaran[in[ int np: the number of packets in list to send
+ * @param[out] int * err: pointer to error return
+ * @retval int npkts: number of packets sent, 0 if error
+ * @pre @ref netapi_init, @ref pktio_create, @ref pktio_open
+ */
+int pktio_sendMulti(PKTIO_HANDLE_T *channel, /* the channel handle */
+ Ti_Pkt * pkt[], /* array of packets to send */
+ PKTIO_METADATA_T * m[], /* meta data array */
+ int np, /* number of packets to send */
+ int * err);
+
+/***********************************/
+/************* polling **************/
+/***********************************/
+
+/*
+* @brief API polls a pkto channel for received packets
+ *
+ * @details This api polls a pktio channel. Any pending data in the channel is
+ * passed to the @ref PKTIO_CB registered when the channel was
+ * created or opened. The channel must
+ * have already been created via @ref pktio_create or opened
+ * via @ref pktio_open. It may have
+ * been created internally during the netapi intialization.
+ * @param[in] @ref PKTIO_HANDLE_T: handle to the channel
+ * @param[in] @ref PKTIO_POLL_T *: pointer to pktio poll configuration
+ * @param[out] int * err: pointer to error return
+ * @retval int npkts: number of packets received by poll
+ * @pre @ref netapi_init, @ref pktio_create, @ref pktio_open
+ */
+static inline int pktio_poll(PKTIO_HANDLE_T * handle, //handle to pktio
+ PKTIO_POLL_T * p_poll_cfg, //polling configuration
+ int * err)
+{
+ handle->_poll((struct PKTIO_HANDLE_tag *) handle, p_poll_cfg, err);
+}
+
+/*
+* @brief API polls all pkto channels associarted with @ref NETAPI_T instance
+ * for received packets
+ *
+ * @details This api polls all pktio channels attached to an instance.
+ * Any pending data in these channels are
+ * passed to the @ref PKTIO_CB registered when the channel was
+ * created or opened. The channels must
+ * have already been created via @ref pktio_create or opened
+ * via @ref pktio_open. They may have
+ * been created internally during the netapi intialization.
+ * @param[in] @ref NETAPI_T: handle of the NETAPI instance
+ * @param[in] @ref PKTIO_POLL_T *: pointer to pktio poll configuration
+ * @param[out] int * err: pointer to error return
+ * @retval int npkts: number of packets received by poll
+ * @pre @ref netapi_init, @ref pktio_create, @ref pktio_open
+ */
+int pktio_pollAll(NETAPI_T handle, PKTIO_POLL_T * p_poll_cfg, int *err);
+
+/*----------------- utilities------------------ */
+/* update max_n for poll */
+#define pktio_set_max_n(handle,max_n) (handle)->max_n=max_n;
+#define pktio_get_netapi_handle(handle) (handle)->back
+#define pktio_set_cookie(handle, cookie) (handle)->cookie = cookie
+#define pktio_get_cookie(handle) (handle)->cookie
+#define pktio_get_q(handle) (handle)->q
+
+/*-----------------Extra Fast Path pkt meta data macros--------------------*/
+#include "cppi_desc.h"
+#include "ti/drv/pa/pa.h"
+#include "ti/drv/pa/pasahost.h"
+
+
+//return default packet queue to poll for netcp RX
+//these are expensive calls, so call once and save
+static inline Qmss_QueueHnd PKTIO_GET_DEFAULT_NETCP_Q(PKTIO_HANDLE_T *h)
+{
+nwalGlobCxtInfo_t Info;
+nwal_getGlobCxtInfo(h->nwalInstanceHandle,&Info);
+return Info.rxDefPktQ;
+}
+
+//return L4Queue to poll for netcp RX (L4 classifier queue)
+//these are expensive calls, so call once and save
+static inline Qmss_QueueHnd PKTIO_GET_DEFAULT_NETCP_L4Q(PKTIO_HANDLE_T *h)
+{
+nwalLocCxtInfo_t Info;
+nwal_getLocCxtInfo(h->nwalInstanceHandle,&Info);
+return Info.rxL4PktQ;
+}
+
+
+
+static inline void PKTIO_QMSS_QUEUE_PUSH_DESC_SIZE_RAW(Qmss_QueueHnd hnd, void *descAddr, uint32_t descSize)
+{
+ /* TBD: Will be moved to QMSS Module */
+ qmssLObj.qmQueMgmtDataReg->QUEUE_MGMT_GROUP[hnd].QUEUE_REG_D = ((uint32_t) descAddr | ((descSize >> 4) - 1));
+ return;
+}
+
+static inline void* PKTIO_QMSS_QUEUE_POP_RAW(Qmss_QueueHnd hnd)
+{
+ /* TBD: Will be moved to QMSS Module */
+ return (void *) (qmssLObj.qmQueMgmtReg->QUEUE_MGMT_GROUP[hnd].QUEUE_REG_D);
+}
+
+//Return NWAL Global Instance
+static inline nwal_Inst PKTIO_GET_NWAL_INSTANCE(PKTIO_HANDLE_T *h)
+{
+return h->nwalInstanceHandle;
+}
+/* find pointer to proto info fields in descriptor */
+static inline pasahoLongInfo_t* PKTIO_GET_PROTO_INFO( Ti_Pkt * pkt)
+{
+pasahoLongInfo_t* pinfo;
+uint32_t infoLen;
+Cppi_getPSData (Cppi_DescType_HOST, Cppi_PSLoc_PS_IN_DESC,(Cppi_Desc *)pkt, (uint8_t **)&pinfo, &infoLen);
+return pinfo;
+}
+
+/** "p" below is return of PKTIO_GET_PROTO_INFO() above**/
+
+/* offset to L3 header */
+#define PKTIO_GET_L3_OFFSET(p) PASAHO_LINFO_READ_L3_OFFSET(p)
+
+/* offset to L4 header */
+#define PKTIO_GET_L4_OFFSET(p) PASAHO_LINFO_READ_L4_OFFSET(p)
+
+/* next proto header */
+#define PKTIO_GET_NEXT_HEADER_TYPE(P) PASAHO_LINFO_READ_NXT_HDR_TYPE(p)
+
+/* offset to L4 payload */
+#define PKTIO_GET_L5_OFFSET(p) PASAHO_LINFO_READ_L5_OFFSET(p)
+
+/* end of L4 payload */
+#define PKTIO_GET_PAYLOAD_END(p) PASAHO_LINFO_READ_END_OFFSET(p)
+
+/* IPSEC ESP done ? */
+#define PKTIO_ESP_DONE(p) PASAHO_LINFO_IS_IPSEC_ESP(p)
+
+/* IPSEC ESP done ? */
+#define PKTIO_AH_DONE(p) PASAHO_LINFO_IS_IPSEC_AH(p)
+
+/* MAC info */
+#define PKTIO_IS_MAC_BROADCAST(p) PASAHO_READ_BITFIELD(p)
+#define PKTIO_IS_MAC_MULTICAST(p) PASAHO_READ_BITFIELD(p)
+#define PKTIO_GET_MAC_TYPE(p) PASAHO_LINFO_READ_MAC_PKTTYPE(p)
+
+/* read input port */
+#define PKTIO_GET_INPUT_PORT(p) PASAHO_LINFO_READ_INPORT(p)
+
+/* AppId */
+static inline unsigned int PKTIO_GET_APPID( Ti_Pkt * pkt)
+{
+unsigned char * p_swinfo0;
+Cppi_getSoftwareInfo (Cppi_DescType_HOST,
+ (Cppi_Desc *)pkt,
+ &p_swinfo0);
+return *((unsigned int *)p_swinfo0);
+}
+
+#endif
index fe785af69fd0ce3f094b7858ad2b09186cd67fa5..bebb9201b028c11543ce9ea2b1e457012a4ce73e 100755 (executable)
-/*******************************\r
- * FILE: netapi.c\r
- * Purpose: implementation of netapi startup/shutdown\r
- **************************************************************\r
- * FILE: netapi.c\r
- * \r
- * DESCRIPTION: netapi main source file for user space transport\r
- * library\r
- * \r
- * REVISION HISTORY: rev 0.0.1 \r
- *\r
- * Copyright (c) Texas Instruments Incorporated 2010-2011\r
- * \r
- * Redistribution and use in source and binary forms, with or without \r
- * modification, are permitted provided that the following conditions \r
- * are met:\r
- *\r
- * Redistributions of source code must retain the above copyright \r
- * notice, this list of conditions and the following disclaimer.\r
- *\r
- * Redistributions in binary form must reproduce the above copyright\r
- * notice, this list of conditions and the following disclaimer in the \r
- * documentation and/or other materials provided with the \r
- * distribution.\r
- *\r
- * Neither the name of Texas Instruments Incorporated nor the names of\r
- * its contributors may be used to endorse or promote products derived\r
- * from this software without specific prior written permission.\r
- *\r
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \r
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT \r
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT \r
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, \r
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT \r
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \r
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \r
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
-\r
- * *****************************/\r
-#include <stdio.h>\r
-#include <stdlib.h>\r
-#include <unistd.h>\r
-#include <string.h>\r
-#include "netapi.h"\r
-\r
-typedef struct PRESET_Tag\r
-{\r
-#define KMAXQ 10\r
- int kqinuse[KMAXQ];\r
- //more..\r
-} PRESET_T;\r
-\r
-/*------------internal prototypes---------------*/\r
-static uint8_t* netapiSharedMemoryMalloc(uint32_t size);\r
-static void netapiSharedMemoryFree(uint8_t* ptr);\r
-static int system_init(NETAPI_HANDLE_T *);\r
-static void get_presets(PRESET_T * p_preset);\r
-static void netapi_cleanup_at_start(void);\r
-\r
-\r
-/*------------globals-----------------*/\r
-#define NUM_HOST_DESC (TUNE_NETAPI_NUM_LOCAL_DESC)\r
-#define SIZE_LOCAL_DESC (TUNE_NETAPI_DESC_SIZE) \r
-#define SIZE_SHARED_DESC (TUNE_NETAPI_DESC_SIZE)\r
-\r
-#define CONFIG_BUFSIZE_PA_INST 256\r
-#define CONFIG_BUFSIZE_L2_TABLE 1000\r
-#define CONFIG_BUFSIZE_L3_TABLE 4000\r
-\r
-static NETAPI_CFG_T netapi_default_cfg=\r
-{\r
-TUNE_NETAPI_PERM_MEM_SZ,\r
-0, //start of packet offset for hw to place data on rx for default flow\r
-TUNE_NETAPI_QM_CONFIG_MAX_DESC_NUM, //max number of descriptors in system\r
-TUNE_NETAPI_NUM_GLOBAL_DESC, //total we will use\r
-TUNE_NETAPI_DEFAULT_NUM_BUFFERS, //#descriptors+buffers in default heap\r
-TUNE_NETAPI_DEFAULT_NUM_SOLO_DESCRIPTORS, //#descriptors w/o buffers in default heap\r
-TUNE_NETAPI_DEFAULT_BUFFER_SIZE //size of buffers in default heap\r
-\r
-};\r
-\r
-static Pktlib_HeapIfTable netapi_pktlib_ifTable;\r
-static NETAPI_GLOBAL_T netapi_global;\r
-NETAPI_GLOBAL_T * netapi_get_global(){ return &netapi_global;}\r
-\r
-/* utility API for NETAPI user to get pktlib if table to use if he creates his own heap */\r
-Pktlib_HeapIfTable *netapi_getPktlibIfTable(void) {return &netapi_pktlib_ifTable;}\r
-\r
-//zap a queue\r
-void netapi_zapQ(int queueNum);\r
-/*-------------------------------------\r
- * initialize NETAPI instance \r
- *-------------------------------------*/\r
-NETAPI_T netapi_init(int master, NETAPI_CFG_T * p_cfg)\r
-{\r
- int i;\r
- int err;\r
- NETAPI_HANDLE_T * p = (NETAPI_HANDLE_T *) calloc(1,sizeof(NETAPI_HANDLE_T));\r
- if (!p) return NULL;\r
- p->master = master;\r
-\r
- /* create space for our local pktios */\r
- for(i=0;i<NETAPI_MAX_PKTIO; i++)\r
- {\r
- p->pktios[i] = calloc(1,sizeof(PKTIO_HANDLE_T));\r
- if (!p->pktios[i]) return NULL;\r
- }\r
-\r
-#ifdef NETAPI_INCLUDE_SCHED\r
- /* create space for scheduler */\r
- p->p_sched = calloc(1,sizeof(NETAPI_SCHED_HANDLE_T));\r
-#endif \r
-\r
-\r
- /* global stuff (if master) */\r
- if (master==NETAPI_SYS_MASTER)\r
- {\r
- if (p_cfg) memcpy(&netapi_global.cfg,p_cfg, sizeof(NETAPI_CFG_T));\r
- else memcpy(&netapi_global.cfg,&netapi_default_cfg, sizeof(NETAPI_CFG_T));\r
- for(i=0;i<NETAPI_MAX_PKTIO;i++) \r
- { \r
- netapi_global.pktios[i].qn.qNum=-1;\r
- netapi_global.pktios[i].name[0]='\0';\r
- }\r
- } \r
- //this goes to shared memory eventually\r
- p->global = (void *) &netapi_global;\r
-\r
-\r
- /* system init */\r
- if(master==NETAPI_SYS_MASTER)\r
- {\r
- err = system_init(p);\r
- if (err<0) \r
- {\r
- //todo: cleanup\r
- return NULL;\r
- }\r
- /* create pktio channels for tx,rx */\r
- }\r
- else\r
- {\r
- /*todo init for non-system cores/threads */\r
- /* qm_start, */\r
- /* attach to heaps */\r
- /* nwal_start */\r
- }\r
- \r
- return (NETAPI_T) p;\r
-}\r
-\r
-/*-------------------------------\r
- * Shut down netapi instance\r
- *-------------------------------*/\r
-void netapi_shutdown(NETAPI_T h)\r
-{\r
- int i;\r
- NETAPI_HANDLE_T * p = (NETAPI_HANDLE_T *) h;\r
- if (!p) return;\r
-\r
- printf(">netapi: WARNING shutdown may not be fully implemented\n");\r
- if (p->master)\r
- {\r
- /* close nwal */\r
- nwal_delete(netapi_global.nwal_context.nwalInstHandle);\r
-\r
- /* close heaps */\r
- netapi_closeHeap(h, p->netcp_heap);\r
- netapi_closeHeap(h, p->netcp_control_heap);\r
- netapi_closeHeap(h, netapi_get_global()->nwal_context.pa2sa_heap); \r
- netapi_closeHeap(h, netapi_get_global()->nwal_context.sa2pa_heap); \r
- \r
- //loop over registered heaps\r
- for(i=0;i<TUNE_NETAPI_MAX_HEAPS;i++)\r
- {\r
- if (p->createdHeaps[i]) {netapi_closeHeap(h,p->createdHeaps[i]);p->createdHeaps[i]=NULL;}\r
- }\r
- netapi_cleanup_at_start(); //clear 1st 50 not-specified queues..\r
- netapi_VM_memory_teardown();\r
- } \r
- free(p);\r
- return;\r
-}\r
-\r
-//exception crash\r
-void netapi_err_teardown() { netapi_cleanup_at_start(); exit(-99); }\r
-\r
-/*-------------------utilities-------------------*/\r
-static uint8_t* netapiSharedMemoryMalloc(uint32_t size)\r
-{\r
-return (uint8_t *)netapi_VM_memAlloc(size, 128); \r
-}\r
-\r
-static void netapiSharedMemoryFree(uint8_t* ptr)\r
-{\r
- /* Do Nothing. */\r
- printf(">netapi Unexpected. need to provide a free () for some reason!! \n");\r
- return;\r
-}\r
-\r
-// initialization\r
-static int system_init(NETAPI_HANDLE_T * handle) \r
-{\r
- int32_t result;\r
- Pktlib_HeapHandle sharedHeapHandle;\r
- Pktlib_HeapHandle controlHeapHandle;\r
-\r
- /* initialize all the memory we are going to use\r
- - chunk for buffers, descriptors\r
- - memory mapped peripherals we use, such as QMSS, PA, etc */\r
- result= netapi_VM_memory_setup();\r
- if (result) printf(">netapi: system init - memory set up OK\n");\r
- else {printf(">netap: system init - memory set up failed\n"); return -1;}\r
-\r
- //get timer running\r
- netapi_init_timer();\r
-\r
- /* Initialize Queue Manager Sub System */\r
- result = netapi_init_qm (netapi_global.cfg.def_max_descriptors); \r
- if (result != 1)\r
- {\r
- return -1;\r
- }\r
-\r
- /* Start the QMSS. */\r
- if (netapi_start_qm() != 1)\r
- {\r
- return -1;\r
- }\r
-\r
- //clean our old junk in 1st bunch of queues that will be allocated to us\r
- netapi_cleanup_at_start();\r
-\r
- /* Initialize the global descriptor memory region. */\r
- result= netapi_qm_setup_mem_region( \r
- netapi_global.cfg.def_tot_descriptors_for_us,\r
- SIZE_SHARED_DESC,\r
- (unsigned int *) netapi_VM_QMemGlobalDescRam,\r
- NETAPI_GLOBAL_REGION);\r
- if(result <0) {printf(">netapi; can't setup QM shared region\n"); return -1;}\r
-\r
-#if 0 //todo setup 2nd region\r
-/* Initialize the local memory region configuration. */\r
- result= netapi_qm_setup_mem_region( \r
- NUM_HOST_DESC,\r
- SIZE_LOCAL_DESC,\r
- netapi_VM_QMemLocalDescRam,\r
- NETAPI_LOCAL_REGION);\r
- if(result <0) {printf("can't setup local region\n"); return -1;}\r
-#endif\r
- /* Initialize CPPI CPDMA */\r
-\r
- result = netapi_init_cppi ();\r
- if (result != 1)\r
- {\r
- printf (">netapi: Error initializing CPPI SubSystem error code : %d\n",result);\r
- return -1;\r
- }\r
-\r
- /* CPPI and Queue Manager are initialized. */\r
- printf (">netapi: Queue Manager and CPPI are initialized.\n");\r
-\r
- /* create main pkt heap */\r
- /* Initialize the Shared Heaps. */\r
- Pktlib_sharedHeapInit();\r
-\r
- /* Populate the heap interface table. */\r
- netapi_pktlib_ifTable.data_malloc = netapiSharedMemoryMalloc;\r
- netapi_pktlib_ifTable.data_free = netapiSharedMemoryFree;\r
-\r
- /* Create Shared Heap with specified configuration. */\r
- sharedHeapHandle = Pktlib_createHeap("netapi", NETAPI_GLOBAL_REGION, //was 0\r
- 1,\r
- netapi_global.cfg.def_heap_buf_size,\r
- netapi_global.cfg.def_heap_n_descriptors,\r
- netapi_global.cfg.def_heap_n_zdescriptors,\r
- &netapi_pktlib_ifTable);\r
- //todo -> cleanup on failure\r
- if (!sharedHeapHandle) { printf(">'netapi' heap create failed\n"); return -1;}\r
- handle->netcp_heap= sharedHeapHandle;\r
-\r
- controlHeapHandle = Pktlib_createHeap("netapi_control", NETAPI_GLOBAL_REGION,\r
- 1,\r
- TUNE_NETAPI_CONFIG_MAX_CTL_RXTX_BUF_SIZE,\r
- TUNE_NETAPI_CONFIG_NUM_CTL_BUF,\r
- 0,\r
- &netapi_pktlib_ifTable);\r
- //todo -> cleanup on failure\r
- if (!controlHeapHandle) { printf(">netapi -'netapicontrol' heap create failed\n"); return -1;}\r
- handle->netcp_control_heap= controlHeapHandle;\r
-\r
-\r
- /* now NWAL */\r
- result = netapi_init_nwal(\r
- NETAPI_GLOBAL_REGION,\r
- &netapi_pktlib_ifTable, \r
- &netapi_global.nwal_context,\r
- &netapi_global.cfg);\r
- if (result<0) {printf(">netapi init_nwal() failed\n"); return -1; }\r
-\r
- /* start NWAL */\r
- result = netapi_start_nwal(sharedHeapHandle, \r
- controlHeapHandle,\r
- &handle->nwal_local,\r
- &netapi_global.nwal_context);\r
- if (result<0) {printf(">netapi start_nwal() failed\n"); return -1; }\r
- //** success **\r
-\r
-\r
- return 0;\r
-\r
-}\r
-\r
-\r
-/*---------------\r
- * get presets()\r
- *---------------*/\r
-static void get_presets(PRESET_T * p_preset)\r
-{\r
- /* read from kernel or overall config area */\r
- /* for now hard code what kernel did */\r
-}\r
-\r
-\r
-/*************************************************************\r
- ******************MISC INTERNAL******************************\r
-**************************************************************/\r
-/* poll the garbage queues of all registered heaps */\r
-void netapi_pollHeapGarbage(NETAPI_T h)\r
-{\r
-int i;\r
- NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;\r
- Pktlib_garbageCollection(n->netcp_heap);\r
- //no need to do garbage collection on other internal heaps\r
- for(i=0;i<TUNE_NETAPI_MAX_HEAPS;i++)\r
- {\r
- if (n->createdHeaps[i]) Pktlib_garbageCollection(n->createdHeaps[i]);\r
- }\r
-}\r
-\r
-/* poll NETCP control queue for responses */\r
-void netapi_netcpPoll(NETAPI_T p)\r
-{\r
- NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) p;\r
- nwal_pollCtl( ((NETAPI_GLOBAL_T *) (n->global))->nwal_context.nwalInstHandle,NULL,NULL);\r
-}\r
-\r
-/****************************************************************\r
- *****************Cleanup Functions******************************\r
-*****************************************************************/\r
-\r
-//clean up function for linux user space\r
-void netapi_zapQ(int queueNum)\r
-{\r
-char * descPtr;\r
-int i;\r
-if (!queueNum) return;\r
-for (i=0;;i+=1 )\r
- {\r
- /* Pop descriptor from source queue */\r
- if ((descPtr = (char *)Qmss_queuePop (queueNum)) == NULL)\r
- {\r
- break;\r
- }\r
- else {/*printf("netapi qzap in play\n");*/}\r
- }\r
- if(i) printf(">netapi: @recovery - %d descriptors cleaned from qn %d\n",i, queueNum);\r
-}\r
-\r
-//defensive: clean out stuff hanging around\r
-//\r
-// open a bunch of free queues and zap them\r
-#define NQUEUES2CLEAR 15\r
-static Qmss_QueueHnd tempQH[NQUEUES2CLEAR];\r
-static void netapi_cleanup_at_start(void)\r
-{\r
-int i;\r
-uint8_t isAllocated;\r
-\r
-for(i=0;i<NQUEUES2CLEAR;i++) \r
-{\r
- tempQH[i] = Qmss_queueOpen(Qmss_QueueType_GENERAL_PURPOSE_QUEUE,\r
- QMSS_PARAM_NOT_SPECIFIED, &isAllocated);\r
- netapi_zapQ(tempQH[i]);\r
-}\r
-\r
-for(i=0;i<NQUEUES2CLEAR;i++)\r
-{\r
- Qmss_queueClose(tempQH[i]);\r
-}\r
-\r
-}\r
-\r
-/********************************\r
- * clean up a pktlib heap\r
- ***********************************/\r
-int netapi_closeHeap(NETAPI_T h, Pktlib_HeapHandle p)\r
-{\r
-Qmss_QueueHnd q;\r
-Pktlib_garbageCollection(p); \r
-q = Pktlib_getZeroHeapQueue(p);\r
-netapi_zapQ(q);\r
-q= Pktlib_getInternalHeapQueue(p);\r
-netapi_zapQ(q);\r
-}\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
+/*******************************
+ * FILE: netapi.c
+ * Purpose: implementation of netapi startup/shutdown
+ **************************************************************
+ * FILE: netapi.c
+ *
+ * DESCRIPTION: netapi main source file for user space transport
+ * library
+ *
+ * REVISION HISTORY: rev 0.0.1
+ *
+ * Copyright (c) Texas Instruments Incorporated 2010-2011
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ * *****************************/
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include "netapi.h"
+
+typedef struct PRESET_Tag
+{
+#define KMAXQ 10
+ int kqinuse[KMAXQ];
+ //more..
+} PRESET_T;
+
+/*------------internal prototypes---------------*/
+static uint8_t* netapiSharedMemoryMalloc(uint32_t size);
+static void netapiSharedMemoryFree(uint8_t* ptr);
+static int system_init(NETAPI_HANDLE_T *);
+static void get_presets(PRESET_T * p_preset);
+static void netapi_cleanup_at_start(void);
+
+
+/*------------globals-----------------*/
+#define NUM_HOST_DESC (TUNE_NETAPI_NUM_LOCAL_DESC)
+#define SIZE_LOCAL_DESC (TUNE_NETAPI_DESC_SIZE)
+#define SIZE_SHARED_DESC (TUNE_NETAPI_DESC_SIZE)
+
+#define CONFIG_BUFSIZE_PA_INST 256
+#define CONFIG_BUFSIZE_L2_TABLE 1000
+#define CONFIG_BUFSIZE_L3_TABLE 4000
+
+static NETAPI_CFG_T netapi_default_cfg=
+{
+TUNE_NETAPI_PERM_MEM_SZ,
+0, //start of packet offset for hw to place data on rx for default flow
+TUNE_NETAPI_QM_CONFIG_MAX_DESC_NUM, //max number of descriptors in system
+TUNE_NETAPI_NUM_GLOBAL_DESC, //total we will use
+TUNE_NETAPI_DEFAULT_NUM_BUFFERS, //#descriptors+buffers in default heap
+TUNE_NETAPI_DEFAULT_NUM_SOLO_DESCRIPTORS, //#descriptors w/o buffers in default heap
+TUNE_NETAPI_DEFAULT_BUFFER_SIZE //size of buffers in default heap
+
+};
+
+static Pktlib_HeapIfTable netapi_pktlib_ifTable;
+static NETAPI_GLOBAL_T netapi_global;
+NETAPI_GLOBAL_T * netapi_get_global(){ return &netapi_global;}
+
+/* utility API for NETAPI user to get pktlib if table to use if he creates his own heap */
+Pktlib_HeapIfTable *netapi_getPktlibIfTable(void) {return &netapi_pktlib_ifTable;}
+
+//zap a queue
+void netapi_zapQ(int queueNum);
+/*-------------------------------------
+ * initialize NETAPI instance
+ *-------------------------------------*/
+NETAPI_T netapi_init(int master, NETAPI_CFG_T * p_cfg)
+{
+ int i;
+ int err;
+ NETAPI_HANDLE_T * p = (NETAPI_HANDLE_T *) calloc(1,sizeof(NETAPI_HANDLE_T));
+ if (!p) return NULL;
+ p->master = master;
+
+ /* create space for our local pktios */
+ for(i=0;i<NETAPI_MAX_PKTIO; i++)
+ {
+ p->pktios[i] = calloc(1,sizeof(PKTIO_HANDLE_T));
+ if (!p->pktios[i]) return NULL;
+ }
+
+#ifdef NETAPI_INCLUDE_SCHED
+ /* create space for scheduler */
+ p->p_sched = calloc(1,sizeof(NETAPI_SCHED_HANDLE_T));
+#endif
+
+
+ /* global stuff (if master) */
+ if (master==NETAPI_SYS_MASTER)
+ {
+ if (p_cfg) memcpy(&netapi_global.cfg,p_cfg, sizeof(NETAPI_CFG_T));
+ else memcpy(&netapi_global.cfg,&netapi_default_cfg, sizeof(NETAPI_CFG_T));
+ for(i=0;i<NETAPI_MAX_PKTIO;i++)
+ {
+ netapi_global.pktios[i].qn.qNum=-1;
+ netapi_global.pktios[i].name[0]='\0';
+ }
+ }
+ //this goes to shared memory eventually
+ p->global = (void *) &netapi_global;
+
+
+ /* system init */
+ if(master==NETAPI_SYS_MASTER)
+ {
+ err = system_init(p);
+ if (err<0)
+ {
+ //todo: cleanup
+ return NULL;
+ }
+ /* create pktio channels for tx,rx */
+ }
+ else
+ {
+ /*todo init for non-system cores/threads */
+ /* qm_start, */
+ /* attach to heaps */
+ /* nwal_start */
+ }
+
+ return (NETAPI_T) p;
+}
+
+/*-------------------------------
+ * Shut down netapi instance
+ *-------------------------------*/
+void netapi_shutdown(NETAPI_T h)
+{
+ int i;
+ NETAPI_HANDLE_T * p = (NETAPI_HANDLE_T *) h;
+ if (!p) return;
+
+ printf(">netapi: WARNING shutdown may not be fully implemented\n");
+ if (p->master)
+ {
+ /* close nwal */
+ nwal_delete(netapi_global.nwal_context.nwalInstHandle);
+
+ /* close heaps */
+ netapi_closeHeap(h, p->netcp_heap);
+ netapi_closeHeap(h, p->netcp_control_heap);
+ netapi_closeHeap(h, netapi_get_global()->nwal_context.pa2sa_heap);
+ netapi_closeHeap(h, netapi_get_global()->nwal_context.sa2pa_heap);
+
+ //loop over registered heaps
+ for(i=0;i<TUNE_NETAPI_MAX_HEAPS;i++)
+ {
+ if (p->createdHeaps[i]) {netapi_closeHeap(h,p->createdHeaps[i]);p->createdHeaps[i]=NULL;}
+ }
+ netapi_cleanup_at_start(); //clear 1st 50 not-specified queues..
+ netapi_VM_memory_teardown();
+ }
+ free(p);
+ return;
+}
+
+//exception crash
+void netapi_err_teardown() { netapi_cleanup_at_start(); exit(-99); }
+
+/*-------------------utilities-------------------*/
+static uint8_t* netapiSharedMemoryMalloc(uint32_t size)
+{
+return (uint8_t *)netapi_VM_memAlloc(size, 128);
+}
+
+static void netapiSharedMemoryFree(uint8_t* ptr)
+{
+ /* Do Nothing. */
+ printf(">netapi Unexpected. need to provide a free () for some reason!! \n");
+ return;
+}
+
+// initialization
+static int system_init(NETAPI_HANDLE_T * handle)
+{
+ int32_t result;
+ Pktlib_HeapHandle sharedHeapHandle;
+ Pktlib_HeapHandle controlHeapHandle;
+ Pktlib_HeapCfg heapCfg;
+ int32_t errCode;
+
+ /* initialize all the memory we are going to use
+ - chunk for buffers, descriptors
+ - memory mapped peripherals we use, such as QMSS, PA, etc */
+ result= netapi_VM_memory_setup();
+ if (result) printf(">netapi: system init - memory set up OK\n");
+ else {printf(">netap: system init - memory set up failed\n"); return -1;}
+
+ //get timer running
+ netapi_init_timer();
+
+ /* Initialize Queue Manager Sub System */
+ result = netapi_init_qm (netapi_global.cfg.def_max_descriptors);
+ if (result != 1)
+ {
+ return -1;
+ }
+
+ /* Start the QMSS. */
+ if (netapi_start_qm() != 1)
+ {
+ return -1;
+ }
+
+ //clean our old junk in 1st bunch of queues that will be allocated to us
+ netapi_cleanup_at_start();
+
+ /* Initialize the global descriptor memory region. */
+ result= netapi_qm_setup_mem_region(
+ netapi_global.cfg.def_tot_descriptors_for_us,
+ SIZE_SHARED_DESC,
+ (unsigned int *) netapi_VM_QMemGlobalDescRam,
+ NETAPI_GLOBAL_REGION);
+ if(result <0) {printf(">netapi; can't setup QM shared region\n"); return -1;}
+
+#if 0 //todo setup 2nd region
+/* Initialize the local memory region configuration. */
+ result= netapi_qm_setup_mem_region(
+ NUM_HOST_DESC,
+ SIZE_LOCAL_DESC,
+ netapi_VM_QMemLocalDescRam,
+ NETAPI_LOCAL_REGION);
+ if(result <0) {printf("can't setup local region\n"); return -1;}
+#endif
+ /* Initialize CPPI CPDMA */
+
+ result = netapi_init_cppi ();
+ if (result != 1)
+ {
+ printf (">netapi: Error initializing CPPI SubSystem error code : %d\n",result);
+ return -1;
+ }
+
+ /* CPPI and Queue Manager are initialized. */
+ printf (">netapi: Queue Manager and CPPI are initialized.\n");
+
+ /* create main pkt heap */
+ /* Initialize the Shared Heaps. */
+ Pktlib_sharedHeapInit();
+
+ /* Populate the heap interface table. */
+ netapi_pktlib_ifTable.data_malloc = netapiSharedMemoryMalloc;
+ netapi_pktlib_ifTable.data_free = netapiSharedMemoryFree;
+
+ /* Initialize the heap configuration. */
+ memset ((void *)&heapCfg, 0, sizeof(Pktlib_HeapCfg));
+ /* Populate the heap configuration */
+ heapCfg.name = "netapi";
+ heapCfg.memRegion = NETAPI_GLOBAL_REGION;
+ heapCfg.sharedHeap = 1;
+ heapCfg.useStarvationQueue = 0;
+ heapCfg.dataBufferSize = netapi_global.cfg.def_heap_buf_size;
+ heapCfg.numPkts = netapi_global.cfg.def_heap_n_descriptors;
+ heapCfg.numZeroBufferPackets= netapi_global.cfg.def_heap_n_zdescriptors;
+ heapCfg.heapInterfaceTable.data_malloc = netapi_pktlib_ifTable.data_malloc;
+ heapCfg.heapInterfaceTable.data_free = netapi_pktlib_ifTable.data_free;
+ heapCfg.dataBufferPktThreshold = 0;
+ heapCfg.zeroBufferPktThreshold = 0;
+
+ /* Create Shared Heap with specified configuration. */
+ sharedHeapHandle = Pktlib_createHeap(&heapCfg, &errCode);
+ //todo -> cleanup on failure
+ if (!sharedHeapHandle) { printf(">'netapi' heap create failed, Error Code: %d\n",errCode); return -1;}
+ handle->netcp_heap= sharedHeapHandle;
+
+
+ /* Update for Control */
+ heapCfg.name = "netapi_control";
+ heapCfg.sharedHeap = 1;
+ heapCfg.dataBufferSize = TUNE_NETAPI_CONFIG_MAX_CTL_RXTX_BUF_SIZE;
+ heapCfg.numPkts = TUNE_NETAPI_CONFIG_NUM_CTL_BUF;
+ heapCfg.numZeroBufferPackets= 0;
+
+ controlHeapHandle = Pktlib_createHeap(&heapCfg, &errCode);;
+ //todo -> cleanup on failure
+ if (!controlHeapHandle) { printf(">netapi -'netapicontrol' heap create failed, Error Code: %d\n",errCode); return -1;}
+ handle->netcp_control_heap= controlHeapHandle;
+
+
+ /* now NWAL */
+ result = netapi_init_nwal(
+ NETAPI_GLOBAL_REGION,
+ &netapi_pktlib_ifTable,
+ &netapi_global.nwal_context,
+ &netapi_global.cfg);
+ if (result<0) {printf(">netapi init_nwal() failed\n"); return -1; }
+
+ /* start NWAL */
+ result = netapi_start_nwal(sharedHeapHandle,
+ controlHeapHandle,
+ &handle->nwal_local,
+ &netapi_global.cfg,
+ &netapi_global.nwal_context);
+ if (result<0) {printf(">netapi start_nwal() failed\n"); return -1; }
+ //** success **
+
+
+ return 0;
+
+}
+
+
+/*---------------
+ * get presets()
+ *---------------*/
+static void get_presets(PRESET_T * p_preset)
+{
+ /* read from kernel or overall config area */
+ /* for now hard code what kernel did */
+}
+
+
+/*************************************************************
+ ******************MISC INTERNAL******************************
+**************************************************************/
+/* poll the garbage queues of all registered heaps */
+void netapi_pollHeapGarbage(NETAPI_T h)
+{
+int i;
+ NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;
+ Pktlib_garbageCollection(n->netcp_heap);
+ //no need to do garbage collection on other internal heaps
+ for(i=0;i<TUNE_NETAPI_MAX_HEAPS;i++)
+ {
+ if (n->createdHeaps[i]) Pktlib_garbageCollection(n->createdHeaps[i]);
+ }
+}
+
+/* poll NETCP control queue for responses */
+void netapi_netcpPoll(NETAPI_T p)
+{
+ NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) p;
+ nwal_pollCtl( ((NETAPI_GLOBAL_T *) (n->global))->nwal_context.nwalInstHandle,NULL,NULL);
+}
+
+/****************************************************************
+ *****************Cleanup Functions******************************
+*****************************************************************/
+
+//clean up function for linux user space
+void netapi_zapQ(int queueNum)
+{
+char * descPtr;
+int i;
+if (!queueNum) return;
+for (i=0;;i+=1 )
+ {
+ /* Pop descriptor from source queue */
+ if ((descPtr = (char *)Qmss_queuePop (queueNum)) == NULL)
+ {
+ break;
+ }
+ else {/*printf("netapi qzap in play\n");*/}
+ }
+ if(i) printf(">netapi: @recovery - %d descriptors cleaned from qn %d\n",i, queueNum);
+}
+
+//defensive: clean out stuff hanging around
+//
+// open a bunch of free queues and zap them
+#define NQUEUES2CLEAR 15
+static Qmss_QueueHnd tempQH[NQUEUES2CLEAR];
+static void netapi_cleanup_at_start(void)
+{
+int i;
+uint8_t isAllocated;
+
+for(i=0;i<NQUEUES2CLEAR;i++)
+{
+ tempQH[i] = Qmss_queueOpen(Qmss_QueueType_GENERAL_PURPOSE_QUEUE,
+ QMSS_PARAM_NOT_SPECIFIED, &isAllocated);
+ netapi_zapQ(tempQH[i]);
+}
+
+for(i=0;i<NQUEUES2CLEAR;i++)
+{
+ Qmss_queueClose(tempQH[i]);
+}
+
+}
+
+/********************************
+ * clean up a pktlib heap
+ ***********************************/
+int netapi_closeHeap(NETAPI_T h, Pktlib_HeapHandle p)
+{
+Qmss_QueueHnd q;
+Pktlib_garbageCollection(p);
+q = Pktlib_getZeroHeapQueue(p);
+netapi_zapQ(q);
+q= Pktlib_getInternalHeapQueue(p);
+netapi_zapQ(q);
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
index 51936b396fabb9f6843196997d8206e06f51e04d..f06293d63b27b444648d9957462b651da6700722 100755 (executable)
-/************************************************\r
-* FILE: netapi_init.c\r
-* Global, local initialization of NETAPI\r
- *\r
- * DESCRIPTION: Functions to initialize framework resources for running NETAPI\r
- *\r
- * REVISION HISTORY:\r
- *\r
- * Copyright (c) Texas Instruments Incorporated 2010-2011\r
- * \r
- * Redistribution and use in source and binary forms, with or without \r
- * modification, are permitted provided that the following conditions \r
- * are met:\r
- *\r
- * Redistributions of source code must retain the above copyright \r
- * notice, this list of conditions and the following disclaimer.\r
- *\r
- * Redistributions in binary form must reproduce the above copyright\r
- * notice, this list of conditions and the following disclaimer in the \r
- * documentation and/or other materials provided with the \r
- * distribution.\r
- *\r
- * Neither the name of Texas Instruments Incorporated nor the names of\r
- * its contributors may be used to endorse or promote products derived\r
- * from this software without specific prior written permission.\r
- *\r
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \r
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT \r
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT \r
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, \r
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT \r
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \r
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \r
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
- *\r
- ***********************************************/\r
-#include <stdint.h>\r
-#include <stdio.h>\r
-#include <string.h>\r
-#include <sys/types.h>\r
-#include <sys/stat.h>\r
-#include <fcntl.h>\r
-#include <sys/mman.h>\r
-#include <errno.h>\r
-#include <unistd.h>\r
-\r
-#include <ti/drv/nwal/nwal.h>\r
-#include "netapi.h"\r
-#include "netapi_vm.h"\r
-#include "netapi_loc.h"\r
-#include "ti/drv/nwal/nwal.h"\r
-\r
-/* CSL RL includes */\r
-#include <ti/csl/cslr_device.h>\r
-#include <ti/csl/cslr_qm_config.h>\r
-#include <ti/csl/cslr_qm_descriptor_region_config.h>\r
-#include <ti/csl/cslr_qm_queue_management.h>\r
-#include <ti/csl/cslr_qm_queue_status_config.h>\r
-#include <ti/csl/cslr_qm_intd.h>\r
-#include <ti/csl/cslr_pdsp.h>\r
-#include <ti/csl/csl_qm_queue.h>\r
-#include <ti/csl/cslr_cppidma_global_config.h>\r
-#include <ti/csl/cslr_cppidma_rx_channel_config.h>\r
-#include <ti/csl/cslr_cppidma_rx_flow_config.h>\r
-#include <ti/csl/cslr_cppidma_tx_channel_config.h>\r
-#include <ti/csl/cslr_cppidma_tx_scheduler_config.h>\r
-#include <ti/csl/csl_cppi.h>\r
-#include <ti/csl/csl_pscAux.h>\r
-#include <ti/csl/csl_semAux.h>\r
-#include <ti/csl/csl_cacheAux.h>\r
-#include <ti/csl/csl_xmcAux.h>\r
-#include <ti/csl/csl_cpsw_3gfAux.h>\r
-#include <ti/csl/csl_cpsw.h>\r
-#include <ti/csl/csl_cpsgmiiAux.h>\r
-#include <ti/drv/qmss/qmss_qm.h>\r
-//pull in device config for qmss, cppi\r
-#include <ti/drv/qmss/device/qmss_device.c>\r
-#include <ti/drv/cppi/device/cppi_device.c>\r
-\r
-/* TODO verify: */\r
-#define CACHE_LINESZ 64\r
-\r
-#define System_printf printf\r
-#define ALIGN(x) __attribute__((aligned (x)))\r
-\r
-/*****************************************************************************\r
- * Global Resources shared by all Cores\r
- *****************************************************************************/\r
-uint8_t *QMemGlobDescRam = 0;\r
-uint8_t *cppiMemPaSaLinkBuf = 0;\r
-uint8_t *cppiMemSaPaLinkBuf = 0;\r
-\r
-/*****************************************************************************\r
- * Local Resource allocated at each Core\r
- *****************************************************************************/\r
-/* Descriptors in global shared */\r
-uint8_t *QMemLocDescRam = NULL;\r
-uint8_t *cppiMemRxPktLinkBuf = NULL;\r
-uint8_t *cppiMemTxPktLinkBuf = NULL;\r
-uint8_t *cppiMemRxCtlLinkBuf = NULL;\r
-uint8_t *cppiMemTxCtlLinkBuf = NULL;\r
-\r
-\r
-//****************************************************\r
-// initialize CPSW (switch) [per SOC]\r
-//***************************************************\r
-int netapi_init_cpsw(void)\r
-{\r
- CSL_CPSW_3GF_ALE_PORTCONTROL alePortControlCfg;\r
-\r
- CSL_CPSW_3GF_clearAleTable();\r
-\r
- alePortControlCfg.dropUntaggedEnable = 0;\r
- alePortControlCfg.vidIngressCheckEnable = 0;\r
-\r
- alePortControlCfg.mcastLimit = 0;\r
- alePortControlCfg.bcastLimit = 0;\r
-\r
- /* Disable learning mode for Port 0 */\r
- alePortControlCfg.noLearnModeEnable = 1;\r
- alePortControlCfg.portState = ALE_PORTSTATE_FORWARD;\r
- CSL_CPSW_3GF_setAlePortControlReg (0, &alePortControlCfg);\r
-\r
- /* Enable learning mode for Port 1 */\r
- alePortControlCfg.noLearnModeEnable = 0;\r
- alePortControlCfg.portState = ALE_PORTSTATE_FORWARD;\r
- CSL_CPSW_3GF_setAlePortControlReg (1, &alePortControlCfg);\r
-\r
- /* Enable learning mode for Port 2 */\r
- alePortControlCfg.noLearnModeEnable = 0;\r
- alePortControlCfg.portState = ALE_PORTSTATE_FORWARD;\r
- CSL_CPSW_3GF_setAlePortControlReg (2, &alePortControlCfg);\r
-\r
- return 1;\r
-}\r
-\r
-//****************************************************\r
-// initialize QM (per SOC)\r
-//***************************************************\r
-int netapi_init_qm(int max_descriptors)\r
-{\r
- Qmss_InitCfg qmssInitConfig;\r
- int32_t result;\r
- Qmss_GlobalConfigParams nwalTest_qmssGblCfgParams;\r
-\r
- memset (&qmssInitConfig, 0, sizeof (Qmss_InitCfg));\r
-\r
- /* Use Internal Linking RAM for optimal performance */\r
- qmssInitConfig.linkingRAM0Base = 0;\r
- qmssInitConfig.linkingRAM0Size = 0;\r
- qmssInitConfig.linkingRAM1Base = 0;\r
- qmssInitConfig.maxDescNum = max_descriptors;\r
- qmssInitConfig.qmssHwStatus =QMSS_HW_INIT_COMPLETE; //bypass some of the hw init\r
- nwalTest_qmssGblCfgParams = qmssGblCfgParams[0];\r
-\r
- nwalTest_qmssGblCfgParams.qmConfigReg = (void *)((uint8_t *)netapi_VM_qmssCfgVaddr +\r
- (CSL_QM_SS_CFG_CONFIG_STARVATION_COUNTER_REGS - CSL_QM_SS_CFG_QUE_PEEK_REGS));\r
- nwalTest_qmssGblCfgParams.qmDescReg = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- ((uint32_t)CSL_QM_SS_CFG_DESCRIPTION_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);\r
- nwalTest_qmssGblCfgParams.qmQueMgmtReg = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- ((uint32_t)CSL_QM_SS_CFG_QM_QUEUE_DEQUEUE_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);\r
- nwalTest_qmssGblCfgParams.qmQueMgmtProxyReg = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- ((uint32_t)CSL_QM_SS_CFG_PROXY_QUEUE_DEQUEUE_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);\r
- nwalTest_qmssGblCfgParams.qmQueStatReg = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- ((uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);\r
- nwalTest_qmssGblCfgParams.qmQueIntdReg = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- ((uint32_t)CSL_QM_SS_CFG_INTD_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);\r
- nwalTest_qmssGblCfgParams.qmPdspCmdReg[0] = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- ((uint32_t)CSL_QM_SS_CFG_SCRACH_RAM1_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);\r
- nwalTest_qmssGblCfgParams.qmPdspCmdReg[1] = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- ((uint32_t)CSL_QM_SS_CFG_SCRACH_RAM2_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);\r
- nwalTest_qmssGblCfgParams.qmPdspCtrlReg[0] = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- ((uint32_t)CSL_QM_SS_CFG_ADSP1_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);\r
- nwalTest_qmssGblCfgParams.qmPdspCtrlReg[1] = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- ((uint32_t)CSL_QM_SS_CFG_ADSP2_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);\r
- nwalTest_qmssGblCfgParams.qmPdspIRamReg[0] = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- ((uint32_t)CSL_QM_SS_CFG_APDSP1_RAM_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);\r
- nwalTest_qmssGblCfgParams.qmPdspIRamReg[1] = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- ((uint32_t)CSL_QM_SS_CFG_APDSP2_RAM_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);\r
- nwalTest_qmssGblCfgParams.qmStatusRAM = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- ((uint32_t)CSL_QM_SS_CFG_QM_STATUS_RAM_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);\r
- nwalTest_qmssGblCfgParams.qmLinkingRAMReg = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- ((uint32_t)CSL_QM_SS_CFG_LINKING_RAM_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);\r
- nwalTest_qmssGblCfgParams.qmMcDMAReg = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- ((uint32_t)CSL_QM_SS_CFG_MCDMA_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);\r
- nwalTest_qmssGblCfgParams.qmTimer16Reg[0] = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- ((uint32_t)CSL_QM_SS_CFG_TIMER1_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);\r
- nwalTest_qmssGblCfgParams.qmTimer16Reg[1] = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- ((uint32_t)CSL_QM_SS_CFG_TIMER2_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);\r
- nwalTest_qmssGblCfgParams.qmQueMgmtDataReg = (void *)((uint32_t)netapi_VM_qmssDataVaddr);\r
- nwalTest_qmssGblCfgParams.qmQueMgmtProxyDataReg = \r
- (void *)((uint32_t)netapi_VM_qmssDataVaddr + ((uint32_t)(0x44040000) - (uint32_t)(0x44020000)));\r
-\r
- result = Qmss_init (&qmssInitConfig, &nwalTest_qmssGblCfgParams);\r
- if (result != QMSS_SOK) {\r
- System_printf (">function init_qm: qmss_Init failed with error code %d\n", result);\r
- return (nwal_FALSE);\r
- }\r
-\r
- return 1;\r
-}\r
-\r
-//****************************************************\r
-// Set up QM memory region (per SOC)\r
-//***************************************************\r
-int netapi_qm_setup_mem_region(\r
- uint32_t numDesc,\r
- uint32_t descSize,\r
- uint32_t* pDescMemBase,\r
- Qmss_MemRegion memRegion)\r
-{\r
- Qmss_MemRegInfo memInfo;\r
- Int32 result;\r
- Int n;\r
- static int netapi_qm_region_index=0;\r
-\r
- memset(&memInfo,0,sizeof(Qmss_MemRegInfo));\r
- memInfo.descBase = pDescMemBase;\r
- memInfo.descSize = descSize;\r
- memInfo.descNum = numDesc;\r
- memInfo.manageDescFlag = Qmss_ManageDesc_MANAGE_DESCRIPTOR;\r
- memInfo.memRegion = memRegion;\r
-\r
- if(memRegion == NETAPI_GLOBAL_REGION)\r
- {\r
- memInfo.startIndex = TUNE_NETAPI_QM_START_INDEX; //was 0\r
- netapi_qm_region_index += numDesc;\r
- }else if(memRegion ==NETAPI_LOCAL_REGION)\r
- {\r
- /* 2nd region for descriptors (perhaps private?) */\r
- memInfo.startIndex = netapi_qm_region_index;\r
- }\r
- else\r
- {\r
- return -1 ;\r
- }\r
-\r
- memset (pDescMemBase, 0, (descSize * numDesc));\r
-\r
- result = Qmss_insertMemoryRegion (&memInfo);\r
- if (result < QMSS_SOK) \r
- {\r
- printf (">function setup_qm_region: Qmss_insertMemoryRegion returned error code %d\n", result);\r
- return (-1);\r
- }\r
-\r
- return 1;\r
-\r
-}\r
-\r
-//****************************************************\r
-// Start QM (per thread)\r
-//***************************************************\r
-int netapi_start_qm(void)\r
-{\r
- int32_t result;\r
- result = Qmss_start();\r
- if (result != QMSS_SOK)\r
- {\r
- System_printf (">start_qm: Qmss_start failed with error code %d\n", result);\r
- return (-1);\r
- }\r
- return 1;\r
-}\r
-\r
-//*************************************************\r
-//initilaize CPPI (once per soc)\r
-//*************************************************\r
-int netapi_init_cppi(void)\r
-{\r
- int32_t result, i;\r
- Cppi_GlobalConfigParams nwalTest_cppiGblCfgParams[CPPI_MAX_CPDMA];\r
-\r
- for (i=0; i<CPPI_MAX_CPDMA; i++)\r
- nwalTest_cppiGblCfgParams[i] = cppiGblCfgParams[i];\r
-\r
- /* SRIO CPDMA regs */\r
- nwalTest_cppiGblCfgParams[Cppi_CpDma_SRIO_CPDMA].gblCfgRegs =\r
- (void *)((uint32_t)netapi_VM_srioCfgVaddr +\r
- (((uint32_t)CSL_SRIO_CONFIG_CPPI_DMA_GLOBAL_CFG_REGS) - (uint32_t)CSL_SRIO_CONFIG_REGS));\r
- nwalTest_cppiGblCfgParams[Cppi_CpDma_SRIO_CPDMA].txChRegs =\r
- (void *)((uint32_t)netapi_VM_srioCfgVaddr +\r
- (((uint32_t)CSL_SRIO_CONFIG_CPPI_DMA_TX_CFG_REGS) - (uint32_t)CSL_SRIO_CONFIG_REGS));\r
-\r
- nwalTest_cppiGblCfgParams[Cppi_CpDma_SRIO_CPDMA].rxChRegs =\r
- (void *)((uint32_t)netapi_VM_srioCfgVaddr +\r
- (((uint32_t)CSL_SRIO_CONFIG_CPPI_DMA_RX_CFG_REGS) - (uint32_t)CSL_SRIO_CONFIG_REGS));\r
- nwalTest_cppiGblCfgParams[Cppi_CpDma_SRIO_CPDMA].txSchedRegs =\r
- (void *)((uint32_t)netapi_VM_srioCfgVaddr +\r
- (((uint32_t)CSL_SRIO_CONFIG_CPPI_DMA_TX_SCHEDULER_CFG_REGS) - (uint32_t)CSL_SRIO_CONFIG_REGS));\r
- nwalTest_cppiGblCfgParams[Cppi_CpDma_SRIO_CPDMA].rxFlowRegs =\r
- (void *)((uint32_t)netapi_VM_srioCfgVaddr +\r
- (((uint32_t)CSL_SRIO_CONFIG_CPPI_DMA_RX_FLOW_CFG_REGS) - (uint32_t)CSL_SRIO_CONFIG_REGS));\r
-\r
- /* PASS CPDMA regs */\r
- nwalTest_cppiGblCfgParams[Cppi_CpDma_PASS_CPDMA].gblCfgRegs =\r
- (void *)((uint32_t)netapi_VM_passCfgVaddr +\r
- (((uint32_t)CSL_PA_SS_CFG_CPPI_DMA_GLOBAL_CFG_REGS) - (uint32_t)CSL_PA_SS_CFG_REGS));\r
- nwalTest_cppiGblCfgParams[Cppi_CpDma_PASS_CPDMA].txChRegs =\r
- (void *)((uint32_t)netapi_VM_passCfgVaddr +\r
- (((uint32_t)CSL_PA_SS_CFG_CPPI_DMA_TX_CFG_REGS) - (uint32_t)CSL_PA_SS_CFG_REGS));\r
- nwalTest_cppiGblCfgParams[Cppi_CpDma_PASS_CPDMA].rxChRegs =\r
- (void *)((uint32_t)netapi_VM_passCfgVaddr +\r
- (((uint32_t)CSL_PA_SS_CFG_CPPI_DMA_RX_CFG_REGS) - (uint32_t)CSL_PA_SS_CFG_REGS));\r
- nwalTest_cppiGblCfgParams[Cppi_CpDma_PASS_CPDMA].txSchedRegs =\r
- (void *)((uint32_t)netapi_VM_passCfgVaddr +\r
- (((uint32_t)CSL_PA_SS_CFG_CPPI_DMA_TX_SCHEDULER_CFG_REGS) - (uint32_t)CSL_PA_SS_CFG_REGS));\r
- nwalTest_cppiGblCfgParams[Cppi_CpDma_PASS_CPDMA].rxFlowRegs =\r
- (void *)((uint32_t)netapi_VM_passCfgVaddr +\r
- (((uint32_t)CSL_PA_SS_CFG_CPPI_DMA_RX_FLOW_CFG_REGS) - (uint32_t)CSL_PA_SS_CFG_REGS));\r
- /* QMSS CPDMA regs */\r
- nwalTest_cppiGblCfgParams[Cppi_CpDma_QMSS_CPDMA].gblCfgRegs =\r
- (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- (((uint32_t)CSL_QM_SS_CFG_CPPI_DMA_GLOBAL_CFG_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS));\r
- nwalTest_cppiGblCfgParams[Cppi_CpDma_QMSS_CPDMA].txChRegs =\r
- (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- (((uint32_t)CSL_QM_SS_CFG_CPPI_DMA_TX_CFG_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS));\r
- nwalTest_cppiGblCfgParams[Cppi_CpDma_QMSS_CPDMA].rxChRegs =\r
- (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- (((uint32_t)CSL_QM_SS_CFG_CPPI_DMA_RX_CFG_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS));\r
- nwalTest_cppiGblCfgParams[Cppi_CpDma_QMSS_CPDMA].txSchedRegs =\r
- (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- (((uint32_t)CSL_QM_SS_CFG_CPPI_DMA_TX_SCHEDULER_CFG_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS));\r
- nwalTest_cppiGblCfgParams[Cppi_CpDma_QMSS_CPDMA].rxFlowRegs =\r
- (void *)((uint32_t)netapi_VM_qmssCfgVaddr +\r
- (((uint32_t)CSL_QM_SS_CFG_CPPI_DMA_RX_FLOW_CFG_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS));\r
-\r
- result = Cppi_init (nwalTest_cppiGblCfgParams);\r
- if (result != CPPI_SOK) \r
- {\r
- printf (">function cppi_init: Cppi_init failed with error code %d\n", result);\r
- return (-1);\r
- }\r
- return 1;\r
-}\r
-\r
-//*************************************************\r
-//initialize NWAL (one per soc) \r
-//*************************************************\r
-/*** NWAL Memory Buffer Configuration ***/\r
-#define NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_HANDLE 3400\r
-uint8_t nwalInstMem[NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_HANDLE]ALIGN(CACHE_LINESZ);\r
-\r
-#define NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_MAC 128\r
-#define NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_IPSEC_HANDLE_PER_CHAN 256\r
-#define NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_IP 128\r
-#define NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_PORT 128\r
-#define NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_L2L3_HDR 128\r
-#define NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_LOC_CONTEXT 384\r
-#define NWAL_CHAN_HANDLE_SIZE ((NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_MAC * TUNE_NETAPI_MAX_NUM_MAC) + \\r
- (NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_IPSEC_HANDLE_PER_CHAN * TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS*2) + \\r
- (NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_IP * TUNE_NETAPI_MAX_NUM_IP) + \\r
- (NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_PORT * TUNE_NETAPI_MAX_NUM_PORTS)+ \\r
- (NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_LOC_CONTEXT * TUNE_NETAPI_NUM_CORES) + \\r
- (NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_L2L3_HDR * TUNE_NETAPI_MAX_NUM_L2_L3_HDRS))\r
-\r
-uint8_t nwalHandleMem[NWAL_CHAN_HANDLE_SIZE]ALIGN(CACHE_LINESZ);\r
-\r
-/* todo: Check if below size information can be made available from pa interface file */\r
-#define NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF0 128\r
-/* PA instance */\r
-/* Memory used for the PA Instance. Needs to be assigned global uncached memory for chip */\r
-uint8_t paBuf0[NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF0]ALIGN(CACHE_LINESZ);\r
-\r
-/* Memory used for PA handles */\r
-#define NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF1 128\r
-uint8_t paBuf1[NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF1]ALIGN(CACHE_LINESZ);\r
-\r
-#define NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF2 768 \r
-uint8_t paBuf2[NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF2]ALIGN(CACHE_LINESZ);\r
-\r
-/* Memory used for SA LLD global Handle */\r
-#define NETAPI_NWAL_CONFIG_BUFSIZE_SA_LLD_HANDLE 384\r
-uint8_t salldHandle[NETAPI_NWAL_CONFIG_BUFSIZE_SA_LLD_HANDLE]ALIGN(CACHE_LINESZ);\r
-\r
-#if 0 //need to alloc this since we need phy addr also \r
-/* Memory used for SA contet Handle */\r
-#define NETAPI_NWAL_CONFIG_BUFSIZE_SA_CONTEXT_PER_CHAN 384\r
-uint8_t saContext[NETAPI_NWAL_CONFIG_BUFSIZE_SA_CONTEXT_PER_CHAN * TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS]ALIGN(CACHE_LINESZ);\r
-#endif\r
-\r
-/* Memory used by SA LLD per Channel */\r
-#define NETAPI_NWAL_CONFIG_BUFSIZE_SA_LLD_HANDLE_PER_CHAN 512\r
-uint8_t salldChanHandle[NETAPI_NWAL_CONFIG_BUFSIZE_SA_LLD_HANDLE_PER_CHAN * TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS*2]ALIGN(CACHE_LINESZ);\r
-\r
-\r
-/*******************************************\r
- * Initialize the nwal subsystem for NETAPI use\r
- ***********************************************/\r
-int netapi_init_nwal(\r
- int region2use, \r
- Pktlib_HeapIfTable * p_table,\r
- NETAPI_NWAL_GLOBAL_CONTEXT_T * p_nwal_context, \r
- NETAPI_CFG_T*p_cfg )\r
-{\r
- nwalSizeInfo_t nwalSizeInfo;\r
- nwalMemBuf_t nwalMemBuf[nwal_N_BUFS];\r
- nwal_RetValue nwalRetVal;\r
- nwalGlobCfg_t nwalGlobCfg;\r
- uint8_t count;\r
- int sizes[nwal_N_BUFS];\r
- int aligns[nwal_N_BUFS];\r
- void* bases[nwal_N_BUFS];\r
-\r
- memset(p_nwal_context,0,sizeof( NETAPI_NWAL_GLOBAL_CONTEXT_T) );\r
- memset(&nwalGlobCfg,0,sizeof(nwalGlobCfg_t ) );\r
-\r
-\r
- /* Initialize Buffer Pool for NetCP PA to SA packets */\r
- nwalGlobCfg.pa2SaBufPool.numBufPools = 1;\r
- nwalGlobCfg.pa2SaBufPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;\r
- nwalGlobCfg.pa2SaBufPool.bufPool[0].bufSize = p_cfg->def_heap_buf_size;\r
- nwalGlobCfg.pa2SaBufPool.bufPool[0].heapHandle = p_nwal_context->pa2sa_heap=\r
- Pktlib_createHeap("nwal PA2SA",\r
- region2use,\r
- 0,\r
- p_cfg->def_heap_buf_size,\r
- TUNE_NETAPI_CONFIG_MAX_PA_TO_SA_DESC,\r
- 0,\r
- p_table);\r
- if(nwalGlobCfg.pa2SaBufPool.bufPool[0].heapHandle == NULL)\r
- {\r
- printf (">Pktlib_createHeap:Heap Creation Failed for PA to SA Buffer Pool \n");\r
- netapi_err_teardown(); \r
- return -1;\r
- }\r
-\r
- /* Initialize Buffer Pool for NetCP SA to PA packets */\r
- nwalGlobCfg.sa2PaBufPool.numBufPools = 1;\r
- nwalGlobCfg.sa2PaBufPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;\r
- nwalGlobCfg.sa2PaBufPool.bufPool[0].bufSize = p_cfg->def_heap_buf_size;\r
-\r
- nwalGlobCfg.sa2PaBufPool.bufPool[0].heapHandle = p_nwal_context->sa2pa_heap=\r
- Pktlib_createHeap("nwal SA2PA",\r
- region2use,\r
- 0,\r
- p_cfg->def_heap_buf_size,\r
- TUNE_NETAPI_CONFIG_MAX_SA_TO_PA_DESC,\r
- 0,\r
- p_table);\r
- if(nwalGlobCfg.sa2PaBufPool.bufPool[0].heapHandle == NULL)\r
- {\r
- printf (">Pktlib_createHeap:Heap Creation Failed for SA to PA Buffer Pool \n");\r
- netapi_err_teardown();\r
- return -1;\r
- }\r
-\r
- nwalGlobCfg.hopLimit = 5;/* Default TTL / Hop Limit */\r
- nwalGlobCfg.lpbackPass = TUNE_NETAPI_NWAL_ENABLE_PASS_LOOPBACK;\r
- nwalGlobCfg.paPowerOn = nwal_TRUE;\r
- nwalGlobCfg.saPowerOn = nwal_TRUE;\r
- nwalGlobCfg.paFwActive = nwal_TRUE;\r
- nwalGlobCfg.saFwActive = nwal_FALSE;\r
-\r
- /* Pick Default Physical Address */\r
- nwalGlobCfg.paVirtBaseAddr = (uint32_t) netapi_VM_passCfgVaddr;\r
- nwalGlobCfg.saVirtBaseAddr = (uint32_t) netapi_VM_passCfgVaddr +\r
- ((uint32_t)CSL_PA_SS_CFG_CP_ACE_CFG_REGS - (uint32_t)CSL_PA_SS_CFG_REGS) ;\r
- nwalGlobCfg.rxDefPktQ = QMSS_PARAM_NOT_SPECIFIED;\r
-\r
- /* Get the Buffer Requirement from NWAL */\r
- memset(&nwalMemBuf,0,sizeof(nwalMemBuf));\r
- memset(&nwalSizeInfo,0,sizeof(nwalSizeInfo));\r
- nwalSizeInfo.nMaxMacAddress = TUNE_NETAPI_MAX_NUM_MAC;\r
- nwalSizeInfo.nMaxIpAddress = TUNE_NETAPI_MAX_NUM_IP;\r
- nwalSizeInfo.nMaxL4Ports = TUNE_NETAPI_MAX_NUM_PORTS;\r
- nwalSizeInfo.nMaxIpSecChannels = TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS;//we allocate 2 per channel\r
- nwalSizeInfo.nMaxDmSecChannels = TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS;//we allocate 2 per channel\r
- nwalSizeInfo.nMaxL2L3Hdr = TUNE_NETAPI_MAX_NUM_L2_L3_HDRS;\r
- nwalSizeInfo.nProc = TUNE_NETAPI_NUM_CORES;\r
- for(count=0;count < nwal_N_BUFS;count++)\r
- {\r
- nwalMemBuf[count].cacheLineSize = CACHE_LINESZ;\r
- }\r
- nwalRetVal = nwal_getBufferReq(&nwalSizeInfo,\r
- sizes,\r
- aligns);\r
- if(nwalRetVal != nwal_OK)\r
- {\r
- printf (">netapi: init_nwal - nwal_getBufferReq Failed %d\n", nwalRetVal);\r
- return nwal_FALSE;\r
- }\r
-\r
-/* Check for memory size requirement and update the base */\r
- count = 0;\r
- bases[nwal_BUF_INDEX_INST] = (uint32_t *)Osal_nwalLocToGlobAddr((uint32_t)nwalInstMem);\r
- if(NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_HANDLE < sizes[nwal_BUF_INDEX_INST])\r
- {\r
- /* Resize Memory */\r
- while(1);\r
- }\r
- count++;\r
-\r
- bases[nwal_BUF_INDEX_INT_HANDLES] = (uint32_t *)Osal_nwalLocToGlobAddr((uint32_t)nwalHandleMem);\r
- if(NWAL_CHAN_HANDLE_SIZE < sizes[nwal_BUF_INDEX_INT_HANDLES])\r
- {\r
- /* Resize Memory */\r
- while(1);\r
- }\r
- count++;\r
- bases[nwal_BUF_INDEX_PA_LLD_BUF0] = (uint32_t *)Osal_nwalLocToGlobAddr((uint32_t)paBuf0);\r
- if((NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF0) < sizes[nwal_BUF_INDEX_PA_LLD_BUF0])\r
- {\r
- /* Resize Memory */\r
- while(1);\r
- }\r
- count++;\r
-\r
- bases[nwal_BUF_INDEX_PA_LLD_BUF1] = (uint32_t *)Osal_nwalLocToGlobAddr((uint32_t)paBuf1);\r
- if((NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF1) < sizes[nwal_BUF_INDEX_PA_LLD_BUF1])\r
- {\r
- /* Resize Memory */\r
- while(1);\r
- }\r
- count++;\r
-\r
- bases[nwal_BUF_INDEX_PA_LLD_BUF2] = (uint32_t *)Osal_nwalLocToGlobAddr((uint32_t)paBuf2);\r
- if((NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF2) < sizes[nwal_BUF_INDEX_PA_LLD_BUF2])\r
- {\r
- /* Resize Memory */\r
- while(1);\r
- }\r
- count++;\r
-#ifdef NETAPI_ENABLE_SECURITY\r
- bases[nwal_BUF_INDEX_SA_LLD_HANDLE] = (uint32_t *)Osal_nwalLocToGlobAddr((uint32_t)salldHandle);\r
- if((NETAPI_NWAL_CONFIG_BUFSIZE_SA_LLD_HANDLE) < sizes[nwal_BUF_INDEX_SA_LLD_HANDLE])\r
- {\r
- /* Resize Memory */\r
- while(1);\r
- }\r
- count++;\r
-\r
- bases[nwal_BUF_INDEX_SA_CONTEXT] = (uint32_t *)Osal_nwalLocToGlobAddr((uint32_t)netapi_VM_SaContextVaddr);\r
- count++;\r
-\r
- bases[nwal_BUF_INDEX_SA_LLD_CHAN_HANDLE] = (uint32_t *)Osal_nwalLocToGlobAddr((uint32_t)salldChanHandle);\r
- if((NETAPI_NWAL_CONFIG_BUFSIZE_SA_LLD_HANDLE_PER_CHAN * TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS*2) <\r
- sizes[nwal_BUF_INDEX_SA_LLD_CHAN_HANDLE])\r
- {\r
- /* Resize Memory */\r
- while(1);\r
- }\r
- count++;\r
-#else\r
- bases[nwal_BUF_INDEX_SA_LLD_HANDLE] = 0;\r
- bases[nwal_BUF_INDEX_SA_CONTEXT] = 0;\r
- bases[nwal_BUF_INDEX_SA_LLD_CHAN_HANDLE] = 0;\r
- count = count+3;\r
-#endif\r
- if(count != nwal_N_BUFS)\r
- {\r
- while(1);\r
- }\r
-\r
- /* Initialize NWAL module */\r
- nwalRetVal = nwal_create(&nwalGlobCfg,\r
- &nwalSizeInfo,\r
- sizes,\r
- bases,\r
- &p_nwal_context->nwalInstHandle);\r
- if(nwalRetVal != nwal_OK)\r
- {\r
- printf (">netapi: init_nwal- nwal_create Failed %d\n",nwalRetVal);\r
- while(1);\r
- }\r
-\r
- printf(">netapi: init_nwal - Global and Local Network initialization Successful \n");\r
- return 1;\r
-}\r
-\r
-//*************************************************\r
-//* Local (per thread/core) nwal initialization0\r
-//**************************************************\r
-int netapi_start_nwal(Pktlib_HeapHandle pkt_heap,\r
- Pktlib_HeapHandle cmd_heap,\r
- NETAPI_NWAL_LOCAL_CONTEXT_T *p,\r
- NETAPI_NWAL_GLOBAL_CONTEXT_T * p_nwal_glob_context )\r
-{\r
- nwalLocCfg_t nwalLocCfg;\r
- int count;\r
- nwal_RetValue nwalRetVal;\r
-\r
- memset(&nwalLocCfg,0,sizeof(nwalLocCfg));\r
-\r
- /* Common Initialization for all cores */\r
- while(count < TUNE_NETAPI_MAX_NUM_TRANS)\r
- {\r
- p_nwal_glob_context->transInfos[count].transId = count;\r
- count++;\r
- }\r
-\r
- /* Call back registration for the core */\r
- nwalLocCfg.pRxPktCallBack = netapi_NWALRxPktCallback;\r
- nwalLocCfg.pCmdCallBack = netapi_NWALCmdCallBack;\r
- nwalLocCfg.pPaStatsCallBack = netapi_NWALCmdPaStatsReply;\r
- nwalLocCfg.pRxDmCallBack= netapi_NWALSBPktCallback; //sideband mode callback\r
-\r
- /* Initialize Buffer Pool for Control packets from NetCP to Host */\r
- nwalLocCfg.rxCtlPool.numBufPools = 1;\r
- nwalLocCfg.rxCtlPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;\r
- nwalLocCfg.rxCtlPool.bufPool[0].bufSize = TUNE_NETAPI_CONFIG_MAX_CTL_RXTX_BUF_SIZE;\r
- nwalLocCfg.rxCtlPool.bufPool[0].heapHandle = cmd_heap;\r
-\r
- /* Initialize Buffer Pool for Control packets from Host to NetCP */\r
- nwalLocCfg.txCtlPool.numBufPools = 1;\r
- nwalLocCfg.txCtlPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;\r
- nwalLocCfg.txCtlPool.bufPool[0].bufSize = TUNE_NETAPI_CONFIG_MAX_CTL_RXTX_BUF_SIZE;\r
- nwalLocCfg.txCtlPool.bufPool[0].heapHandle = cmd_heap;\r
-\r
-/* Initialize Buffer Pool for Packets from NetCP to Host */\r
- nwalLocCfg.rxPktPool.numBufPools = 1;\r
- nwalLocCfg.rxPktPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;\r
- nwalLocCfg.rxPktPool.bufPool[0].bufSize = TUNE_NETAPI_DEFAULT_BUFFER_SIZE;\r
- nwalLocCfg.rxPktPool.bufPool[0].heapHandle = pkt_heap;\r
-\r
-/* Initialize Buffer Pool for Packets from Host to NetCP */\r
- nwalLocCfg.txPktPool.numBufPools = 1;\r
- nwalLocCfg.txPktPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;\r
- nwalLocCfg.txPktPool.bufPool[0].bufSize = TUNE_NETAPI_DEFAULT_BUFFER_SIZE;\r
- nwalLocCfg.txPktPool.bufPool[0].heapHandle = pkt_heap;\r
-\r
- memcpy(&p->nwalLocCfg,&nwalLocCfg,sizeof(nwalLocCfg_t));\r
- while(1)\r
- {\r
- nwalRetVal = nwal_start(p_nwal_glob_context->nwalInstHandle,&nwalLocCfg);\r
- if(nwalRetVal == nwal_ERR_INVALID_STATE)\r
- {\r
- continue;\r
- }\r
- break;\r
- }\r
-\r
- if(nwalRetVal != nwal_OK)\r
- {\r
- printf (">nwal_start:Failed ->err %d !!!\n", nwalRetVal);\r
- return -1;\r
- }\r
- p->state = NETAPI_NW_CXT_LOC_ACTIVE;\r
- return 1;\r
-\r
-\r
-}\r
-//***************************************************\r
-// intialize timer\r
-//***************************************************\r
-int netapi_init_timer(void)\r
-{\r
- return t64_start();\r
-}\r
-\r
-\r
+/************************************************
+* FILE: netapi_init.c
+* Global, local initialization of NETAPI
+ *
+ * DESCRIPTION: Functions to initialize framework resources for running NETAPI
+ *
+ * REVISION HISTORY:
+ *
+ * Copyright (c) Texas Instruments Incorporated 2010-2011
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ***********************************************/
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <ti/drv/nwal/nwal.h>
+#include "netapi.h"
+#include "netapi_vm.h"
+#include "netapi_loc.h"
+#include "ti/drv/nwal/nwal.h"
+
+/* CSL RL includes */
+#include <ti/csl/cslr_device.h>
+#include <ti/csl/cslr_qm_config.h>
+#include <ti/csl/cslr_qm_descriptor_region_config.h>
+#include <ti/csl/cslr_qm_queue_management.h>
+#include <ti/csl/cslr_qm_queue_status_config.h>
+#include <ti/csl/cslr_qm_intd.h>
+#include <ti/csl/cslr_pdsp.h>
+#include <ti/csl/csl_qm_queue.h>
+#include <ti/csl/cslr_cppidma_global_config.h>
+#include <ti/csl/cslr_cppidma_rx_channel_config.h>
+#include <ti/csl/cslr_cppidma_rx_flow_config.h>
+#include <ti/csl/cslr_cppidma_tx_channel_config.h>
+#include <ti/csl/cslr_cppidma_tx_scheduler_config.h>
+#include <ti/csl/csl_cppi.h>
+#include <ti/csl/csl_pscAux.h>
+#include <ti/csl/csl_semAux.h>
+#include <ti/csl/csl_cacheAux.h>
+#include <ti/csl/csl_xmcAux.h>
+#include <ti/csl/csl_cpsw_3gfAux.h>
+#include <ti/csl/csl_cpsw.h>
+#include <ti/csl/csl_cpsgmiiAux.h>
+#include <ti/drv/qmss/qmss_qm.h>
+//pull in device config for qmss, cppi
+#include <ti/drv/qmss/device/qmss_device.c>
+#include <ti/drv/cppi/device/cppi_device.c>
+
+/* TODO verify: */
+#define CACHE_LINESZ 64
+
+#define System_printf printf
+#define ALIGN(x) __attribute__((aligned (x)))
+
+/*****************************************************************************
+ * Global Resources shared by all Cores
+ *****************************************************************************/
+uint8_t *QMemGlobDescRam = 0;
+uint8_t *cppiMemPaSaLinkBuf = 0;
+uint8_t *cppiMemSaPaLinkBuf = 0;
+
+/*****************************************************************************
+ * Local Resource allocated at each Core
+ *****************************************************************************/
+/* Descriptors in global shared */
+uint8_t *QMemLocDescRam = NULL;
+uint8_t *cppiMemRxPktLinkBuf = NULL;
+uint8_t *cppiMemTxPktLinkBuf = NULL;
+uint8_t *cppiMemRxCtlLinkBuf = NULL;
+uint8_t *cppiMemTxCtlLinkBuf = NULL;
+
+
+//****************************************************
+// initialize CPSW (switch) [per SOC]
+//***************************************************
+int netapi_init_cpsw(void)
+{
+ CSL_CPSW_3GF_ALE_PORTCONTROL alePortControlCfg;
+
+ CSL_CPSW_3GF_clearAleTable();
+
+ alePortControlCfg.dropUntaggedEnable = 0;
+ alePortControlCfg.vidIngressCheckEnable = 0;
+
+ alePortControlCfg.mcastLimit = 0;
+ alePortControlCfg.bcastLimit = 0;
+
+ /* Disable learning mode for Port 0 */
+ alePortControlCfg.noLearnModeEnable = 1;
+ alePortControlCfg.portState = ALE_PORTSTATE_FORWARD;
+ CSL_CPSW_3GF_setAlePortControlReg (0, &alePortControlCfg);
+
+ /* Enable learning mode for Port 1 */
+ alePortControlCfg.noLearnModeEnable = 0;
+ alePortControlCfg.portState = ALE_PORTSTATE_FORWARD;
+ CSL_CPSW_3GF_setAlePortControlReg (1, &alePortControlCfg);
+
+ /* Enable learning mode for Port 2 */
+ alePortControlCfg.noLearnModeEnable = 0;
+ alePortControlCfg.portState = ALE_PORTSTATE_FORWARD;
+ CSL_CPSW_3GF_setAlePortControlReg (2, &alePortControlCfg);
+
+ return 1;
+}
+
+//****************************************************
+// initialize QM (per SOC)
+//***************************************************
+int netapi_init_qm(int max_descriptors)
+{
+ Qmss_InitCfg qmssInitConfig;
+ int32_t result;
+ Qmss_GlobalConfigParams nwalTest_qmssGblCfgParams;
+
+ memset (&qmssInitConfig, 0, sizeof (Qmss_InitCfg));
+
+ /* Use Internal Linking RAM for optimal performance */
+ qmssInitConfig.linkingRAM0Base = 0;
+ qmssInitConfig.linkingRAM0Size = 0;
+ qmssInitConfig.linkingRAM1Base = 0;
+ qmssInitConfig.maxDescNum = max_descriptors;
+ qmssInitConfig.qmssHwStatus =QMSS_HW_INIT_COMPLETE; //bypass some of the hw init
+ nwalTest_qmssGblCfgParams = qmssGblCfgParams[0];
+
+ nwalTest_qmssGblCfgParams.qmConfigReg = (void *)((uint8_t *)netapi_VM_qmssCfgVaddr +
+ (CSL_QM_SS_CFG_CONFIG_STARVATION_COUNTER_REGS - CSL_QM_SS_CFG_QUE_PEEK_REGS));
+ nwalTest_qmssGblCfgParams.qmDescReg = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ ((uint32_t)CSL_QM_SS_CFG_DESCRIPTION_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);
+ nwalTest_qmssGblCfgParams.qmQueMgmtReg = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ ((uint32_t)CSL_QM_SS_CFG_QM_QUEUE_DEQUEUE_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);
+ nwalTest_qmssGblCfgParams.qmQueMgmtProxyReg = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ ((uint32_t)CSL_QM_SS_CFG_PROXY_QUEUE_DEQUEUE_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);
+ nwalTest_qmssGblCfgParams.qmQueStatReg = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ ((uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);
+ nwalTest_qmssGblCfgParams.qmQueIntdReg = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ ((uint32_t)CSL_QM_SS_CFG_INTD_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);
+ nwalTest_qmssGblCfgParams.qmPdspCmdReg[0] = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ ((uint32_t)CSL_QM_SS_CFG_SCRACH_RAM1_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);
+ nwalTest_qmssGblCfgParams.qmPdspCmdReg[1] = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ ((uint32_t)CSL_QM_SS_CFG_SCRACH_RAM2_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);
+ nwalTest_qmssGblCfgParams.qmPdspCtrlReg[0] = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ ((uint32_t)CSL_QM_SS_CFG_ADSP1_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);
+ nwalTest_qmssGblCfgParams.qmPdspCtrlReg[1] = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ ((uint32_t)CSL_QM_SS_CFG_ADSP2_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);
+ nwalTest_qmssGblCfgParams.qmPdspIRamReg[0] = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ ((uint32_t)CSL_QM_SS_CFG_APDSP1_RAM_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);
+ nwalTest_qmssGblCfgParams.qmPdspIRamReg[1] = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ ((uint32_t)CSL_QM_SS_CFG_APDSP2_RAM_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);
+ nwalTest_qmssGblCfgParams.qmStatusRAM = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ ((uint32_t)CSL_QM_SS_CFG_QM_STATUS_RAM_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);
+ nwalTest_qmssGblCfgParams.qmLinkingRAMReg = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ ((uint32_t)CSL_QM_SS_CFG_LINKING_RAM_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);
+ nwalTest_qmssGblCfgParams.qmMcDMAReg = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ ((uint32_t)CSL_QM_SS_CFG_MCDMA_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);
+ nwalTest_qmssGblCfgParams.qmTimer16Reg[0] = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ ((uint32_t)CSL_QM_SS_CFG_TIMER1_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);
+ nwalTest_qmssGblCfgParams.qmTimer16Reg[1] = (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ ((uint32_t)CSL_QM_SS_CFG_TIMER2_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS);
+ nwalTest_qmssGblCfgParams.qmQueMgmtDataReg = (void *)((uint32_t)netapi_VM_qmssDataVaddr);
+ nwalTest_qmssGblCfgParams.qmQueMgmtProxyDataReg =
+ (void *)((uint32_t)netapi_VM_qmssDataVaddr + ((uint32_t)(0x44040000) - (uint32_t)(0x44020000)));
+
+ result = Qmss_init (&qmssInitConfig, &nwalTest_qmssGblCfgParams);
+ if (result != QMSS_SOK) {
+ System_printf (">function init_qm: qmss_Init failed with error code %d\n", result);
+ return (nwal_FALSE);
+ }
+
+ return 1;
+}
+
+//****************************************************
+// Set up QM memory region (per SOC)
+//***************************************************
+int netapi_qm_setup_mem_region(
+ uint32_t numDesc,
+ uint32_t descSize,
+ uint32_t* pDescMemBase,
+ Qmss_MemRegion memRegion)
+{
+ Qmss_MemRegInfo memInfo;
+ Int32 result;
+ Int n;
+ static int netapi_qm_region_index=0;
+
+ memset(&memInfo,0,sizeof(Qmss_MemRegInfo));
+ memInfo.descBase = pDescMemBase;
+ memInfo.descSize = descSize;
+ memInfo.descNum = numDesc;
+ memInfo.manageDescFlag = Qmss_ManageDesc_MANAGE_DESCRIPTOR;
+ memInfo.memRegion = memRegion;
+
+ if(memRegion == NETAPI_GLOBAL_REGION)
+ {
+ memInfo.startIndex = TUNE_NETAPI_QM_START_INDEX; //was 0
+ netapi_qm_region_index += numDesc;
+ }else if(memRegion ==NETAPI_LOCAL_REGION)
+ {
+ /* 2nd region for descriptors (perhaps private?) */
+ memInfo.startIndex = netapi_qm_region_index;
+ }
+ else
+ {
+ return -1 ;
+ }
+
+ memset (pDescMemBase, 0, (descSize * numDesc));
+
+ result = Qmss_insertMemoryRegion (&memInfo);
+ if (result < QMSS_SOK)
+ {
+ printf (">function setup_qm_region: Qmss_insertMemoryRegion returned error code %d\n", result);
+ return (-1);
+ }
+
+ return 1;
+
+}
+
+//****************************************************
+// Start QM (per thread)
+//***************************************************
+int netapi_start_qm(void)
+{
+ int32_t result;
+ result = Qmss_start();
+ if (result != QMSS_SOK)
+ {
+ System_printf (">start_qm: Qmss_start failed with error code %d\n", result);
+ return (-1);
+ }
+ return 1;
+}
+
+//*************************************************
+//initilaize CPPI (once per soc)
+//*************************************************
+int netapi_init_cppi(void)
+{
+ int32_t result, i;
+ Cppi_GlobalConfigParams nwalTest_cppiGblCfgParams[CPPI_MAX_CPDMA];
+
+ for (i=0; i<CPPI_MAX_CPDMA; i++)
+ nwalTest_cppiGblCfgParams[i] = cppiGblCfgParams[i];
+
+ /* SRIO CPDMA regs */
+ nwalTest_cppiGblCfgParams[Cppi_CpDma_SRIO_CPDMA].gblCfgRegs =
+ (void *)((uint32_t)netapi_VM_srioCfgVaddr +
+ (((uint32_t)CSL_SRIO_CONFIG_CPPI_DMA_GLOBAL_CFG_REGS) - (uint32_t)CSL_SRIO_CONFIG_REGS));
+ nwalTest_cppiGblCfgParams[Cppi_CpDma_SRIO_CPDMA].txChRegs =
+ (void *)((uint32_t)netapi_VM_srioCfgVaddr +
+ (((uint32_t)CSL_SRIO_CONFIG_CPPI_DMA_TX_CFG_REGS) - (uint32_t)CSL_SRIO_CONFIG_REGS));
+
+ nwalTest_cppiGblCfgParams[Cppi_CpDma_SRIO_CPDMA].rxChRegs =
+ (void *)((uint32_t)netapi_VM_srioCfgVaddr +
+ (((uint32_t)CSL_SRIO_CONFIG_CPPI_DMA_RX_CFG_REGS) - (uint32_t)CSL_SRIO_CONFIG_REGS));
+ nwalTest_cppiGblCfgParams[Cppi_CpDma_SRIO_CPDMA].txSchedRegs =
+ (void *)((uint32_t)netapi_VM_srioCfgVaddr +
+ (((uint32_t)CSL_SRIO_CONFIG_CPPI_DMA_TX_SCHEDULER_CFG_REGS) - (uint32_t)CSL_SRIO_CONFIG_REGS));
+ nwalTest_cppiGblCfgParams[Cppi_CpDma_SRIO_CPDMA].rxFlowRegs =
+ (void *)((uint32_t)netapi_VM_srioCfgVaddr +
+ (((uint32_t)CSL_SRIO_CONFIG_CPPI_DMA_RX_FLOW_CFG_REGS) - (uint32_t)CSL_SRIO_CONFIG_REGS));
+
+ /* PASS CPDMA regs */
+ nwalTest_cppiGblCfgParams[Cppi_CpDma_PASS_CPDMA].gblCfgRegs =
+ (void *)((uint32_t)netapi_VM_passCfgVaddr +
+ (((uint32_t)CSL_PA_SS_CFG_CPPI_DMA_GLOBAL_CFG_REGS) - (uint32_t)CSL_PA_SS_CFG_REGS));
+ nwalTest_cppiGblCfgParams[Cppi_CpDma_PASS_CPDMA].txChRegs =
+ (void *)((uint32_t)netapi_VM_passCfgVaddr +
+ (((uint32_t)CSL_PA_SS_CFG_CPPI_DMA_TX_CFG_REGS) - (uint32_t)CSL_PA_SS_CFG_REGS));
+ nwalTest_cppiGblCfgParams[Cppi_CpDma_PASS_CPDMA].rxChRegs =
+ (void *)((uint32_t)netapi_VM_passCfgVaddr +
+ (((uint32_t)CSL_PA_SS_CFG_CPPI_DMA_RX_CFG_REGS) - (uint32_t)CSL_PA_SS_CFG_REGS));
+ nwalTest_cppiGblCfgParams[Cppi_CpDma_PASS_CPDMA].txSchedRegs =
+ (void *)((uint32_t)netapi_VM_passCfgVaddr +
+ (((uint32_t)CSL_PA_SS_CFG_CPPI_DMA_TX_SCHEDULER_CFG_REGS) - (uint32_t)CSL_PA_SS_CFG_REGS));
+ nwalTest_cppiGblCfgParams[Cppi_CpDma_PASS_CPDMA].rxFlowRegs =
+ (void *)((uint32_t)netapi_VM_passCfgVaddr +
+ (((uint32_t)CSL_PA_SS_CFG_CPPI_DMA_RX_FLOW_CFG_REGS) - (uint32_t)CSL_PA_SS_CFG_REGS));
+ /* QMSS CPDMA regs */
+ nwalTest_cppiGblCfgParams[Cppi_CpDma_QMSS_CPDMA].gblCfgRegs =
+ (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ (((uint32_t)CSL_QM_SS_CFG_CPPI_DMA_GLOBAL_CFG_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS));
+ nwalTest_cppiGblCfgParams[Cppi_CpDma_QMSS_CPDMA].txChRegs =
+ (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ (((uint32_t)CSL_QM_SS_CFG_CPPI_DMA_TX_CFG_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS));
+ nwalTest_cppiGblCfgParams[Cppi_CpDma_QMSS_CPDMA].rxChRegs =
+ (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ (((uint32_t)CSL_QM_SS_CFG_CPPI_DMA_RX_CFG_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS));
+ nwalTest_cppiGblCfgParams[Cppi_CpDma_QMSS_CPDMA].txSchedRegs =
+ (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ (((uint32_t)CSL_QM_SS_CFG_CPPI_DMA_TX_SCHEDULER_CFG_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS));
+ nwalTest_cppiGblCfgParams[Cppi_CpDma_QMSS_CPDMA].rxFlowRegs =
+ (void *)((uint32_t)netapi_VM_qmssCfgVaddr +
+ (((uint32_t)CSL_QM_SS_CFG_CPPI_DMA_RX_FLOW_CFG_REGS) - (uint32_t)CSL_QM_SS_CFG_QUE_PEEK_REGS));
+
+ result = Cppi_init (nwalTest_cppiGblCfgParams);
+ if (result != CPPI_SOK)
+ {
+ printf (">function cppi_init: Cppi_init failed with error code %d\n", result);
+ return (-1);
+ }
+ return 1;
+}
+
+//*************************************************
+//initialize NWAL (one per soc)
+//*************************************************
+/*** NWAL Memory Buffer Configuration ***/
+#define NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_HANDLE 3400
+uint8_t nwalInstMem[NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_HANDLE]ALIGN(CACHE_LINESZ);
+
+#define NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_MAC 128
+#define NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_IPSEC_HANDLE_PER_CHAN 256
+#define NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_IP 128
+#define NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_PORT 128
+#define NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_L2L3_HDR 128
+#define NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_LOC_CONTEXT 384
+#define NWAL_CHAN_HANDLE_SIZE ((NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_MAC * TUNE_NETAPI_MAX_NUM_MAC) + \
+ (NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_IPSEC_HANDLE_PER_CHAN * TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS*2) + \
+ (NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_IP * TUNE_NETAPI_MAX_NUM_IP) + \
+ (NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_PORT * TUNE_NETAPI_MAX_NUM_PORTS)+ \
+ (NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_LOC_CONTEXT * TUNE_NETAPI_NUM_CORES) + \
+ (NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_PER_L2L3_HDR * TUNE_NETAPI_MAX_NUM_L2_L3_HDRS))
+
+uint8_t nwalHandleMem[NWAL_CHAN_HANDLE_SIZE]ALIGN(CACHE_LINESZ);
+
+/* todo: Check if below size information can be made available from pa interface file */
+#define NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF0 128
+/* PA instance */
+/* Memory used for the PA Instance. Needs to be assigned global uncached memory for chip */
+uint8_t paBuf0[NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF0]ALIGN(CACHE_LINESZ);
+
+/* Memory used for PA handles */
+#define NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF1 128
+uint8_t paBuf1[NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF1]ALIGN(CACHE_LINESZ);
+
+#define NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF2 768
+uint8_t paBuf2[NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF2]ALIGN(CACHE_LINESZ);
+
+/* Memory used for SA LLD global Handle */
+#define NETAPI_NWAL_CONFIG_BUFSIZE_SA_LLD_HANDLE 384
+uint8_t salldHandle[NETAPI_NWAL_CONFIG_BUFSIZE_SA_LLD_HANDLE]ALIGN(CACHE_LINESZ);
+
+#if 0 //need to alloc this since we need phy addr also
+/* Memory used for SA contet Handle */
+#define NETAPI_NWAL_CONFIG_BUFSIZE_SA_CONTEXT_PER_CHAN 384
+uint8_t saContext[NETAPI_NWAL_CONFIG_BUFSIZE_SA_CONTEXT_PER_CHAN * TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS]ALIGN(CACHE_LINESZ);
+#endif
+
+/* Memory used by SA LLD per Channel */
+#define NETAPI_NWAL_CONFIG_BUFSIZE_SA_LLD_HANDLE_PER_CHAN 512
+uint8_t salldChanHandle[NETAPI_NWAL_CONFIG_BUFSIZE_SA_LLD_HANDLE_PER_CHAN * TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS*2]ALIGN(CACHE_LINESZ);
+
+
+/*******************************************
+ * Initialize the nwal subsystem for NETAPI use
+ ***********************************************/
+int netapi_init_nwal(
+ int region2use,
+ Pktlib_HeapIfTable * p_table,
+ NETAPI_NWAL_GLOBAL_CONTEXT_T * p_nwal_context,
+ NETAPI_CFG_T*p_cfg )
+{
+ nwalSizeInfo_t nwalSizeInfo;
+ nwalMemBuf_t nwalMemBuf[nwal_N_BUFS];
+ nwal_RetValue nwalRetVal;
+ nwalGlobCfg_t nwalGlobCfg;
+ uint8_t count;
+ int sizes[nwal_N_BUFS];
+ int aligns[nwal_N_BUFS];
+ void* bases[nwal_N_BUFS];
+ Pktlib_HeapCfg heapCfg;
+ int32_t errCode;
+
+ memset(p_nwal_context,0,sizeof( NETAPI_NWAL_GLOBAL_CONTEXT_T) );
+ memset(&nwalGlobCfg,0,sizeof(nwalGlobCfg_t ) );
+
+
+ /* Initialize Buffer Pool for NetCP PA to SA packets */
+ nwalGlobCfg.pa2SaBufPool.numBufPools = 1;
+ nwalGlobCfg.pa2SaBufPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;
+ nwalGlobCfg.pa2SaBufPool.bufPool[0].bufSize = p_cfg->def_heap_buf_size;
+
+ /* Initialize the heap configuration. */
+ memset ((void *)&heapCfg, 0, sizeof(Pktlib_HeapCfg));
+ /* Populate the heap configuration */
+ heapCfg.name = "nwal PA2SA";
+ heapCfg.memRegion = region2use;
+ heapCfg.sharedHeap = 0;
+ heapCfg.useStarvationQueue = 0;
+ heapCfg.dataBufferSize = p_cfg->def_heap_buf_size;
+ heapCfg.numPkts = TUNE_NETAPI_CONFIG_MAX_PA_TO_SA_DESC;
+ heapCfg.numZeroBufferPackets= 0;
+ heapCfg.heapInterfaceTable.data_malloc = p_table->data_malloc;
+ heapCfg.heapInterfaceTable.data_free = p_table->data_free;
+ heapCfg.dataBufferPktThreshold = 0;
+ heapCfg.zeroBufferPktThreshold = 0;
+
+
+ nwalGlobCfg.pa2SaBufPool.bufPool[0].heapHandle = p_nwal_context->pa2sa_heap=
+ Pktlib_createHeap(&heapCfg, &errCode);
+ if(nwalGlobCfg.pa2SaBufPool.bufPool[0].heapHandle == NULL)
+ {
+ printf (">Pktlib_createHeap:Heap Creation Failed for PA to SA Buffer Pool , Error Code: %d\n",errCode);
+ netapi_err_teardown();
+ return -1;
+ }
+
+ /* Initialize Buffer Pool for NetCP SA to PA packets */
+ nwalGlobCfg.sa2PaBufPool.numBufPools = 1;
+ nwalGlobCfg.sa2PaBufPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;
+ nwalGlobCfg.sa2PaBufPool.bufPool[0].bufSize = p_cfg->def_heap_buf_size;
+
+ /* Populate the heap configuration */
+ heapCfg.name = "nwal SA2PA";
+ heapCfg.numPkts = TUNE_NETAPI_CONFIG_MAX_SA_TO_PA_DESC;
+
+ nwalGlobCfg.sa2PaBufPool.bufPool[0].heapHandle = p_nwal_context->sa2pa_heap=
+ Pktlib_createHeap(&heapCfg, &errCode);
+ if(nwalGlobCfg.sa2PaBufPool.bufPool[0].heapHandle == NULL)
+ {
+ printf (">Pktlib_createHeap:Heap Creation Failed for SA to PA Buffer Pool , Error Code: %d\n",errCode);
+ netapi_err_teardown();
+ return -1;
+ }
+
+ nwalGlobCfg.hopLimit = 5;/* Default TTL / Hop Limit */
+ nwalGlobCfg.paPowerOn = nwal_TRUE;
+ nwalGlobCfg.saPowerOn = nwal_TRUE;
+ nwalGlobCfg.paFwActive = nwal_TRUE;
+ nwalGlobCfg.saFwActive = nwal_FALSE;
+
+ /* Pick Default Physical Address */
+ nwalGlobCfg.paVirtBaseAddr = (uint32_t) netapi_VM_passCfgVaddr;
+ nwalGlobCfg.saVirtBaseAddr = (uint32_t) netapi_VM_passCfgVaddr +
+ ((uint32_t)CSL_PA_SS_CFG_CP_ACE_CFG_REGS - (uint32_t)CSL_PA_SS_CFG_REGS) ;
+ nwalGlobCfg.rxDefPktQ = QMSS_PARAM_NOT_SPECIFIED;
+
+ /* Get the Buffer Requirement from NWAL */
+ memset(&nwalMemBuf,0,sizeof(nwalMemBuf));
+ memset(&nwalSizeInfo,0,sizeof(nwalSizeInfo));
+ nwalSizeInfo.nMaxMacAddress = TUNE_NETAPI_MAX_NUM_MAC;
+ nwalSizeInfo.nMaxIpAddress = TUNE_NETAPI_MAX_NUM_IP;
+ nwalSizeInfo.nMaxL4Ports = TUNE_NETAPI_MAX_NUM_PORTS;
+ nwalSizeInfo.nMaxIpSecChannels = TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS;//we allocate 2 per channel
+ nwalSizeInfo.nMaxDmSecChannels = TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS;//we allocate 2 per channel
+ nwalSizeInfo.nMaxL2L3Hdr = TUNE_NETAPI_MAX_NUM_L2_L3_HDRS;
+ nwalSizeInfo.nProc = TUNE_NETAPI_NUM_CORES;
+ for(count=0;count < nwal_N_BUFS;count++)
+ {
+ nwalMemBuf[count].cacheLineSize = CACHE_LINESZ;
+ }
+ nwalRetVal = nwal_getBufferReq(&nwalSizeInfo,
+ sizes,
+ aligns);
+ if(nwalRetVal != nwal_OK)
+ {
+ printf (">netapi: init_nwal - nwal_getBufferReq Failed %d\n", nwalRetVal);
+ return nwal_FALSE;
+ }
+
+/* Check for memory size requirement and update the base */
+ count = 0;
+ bases[nwal_BUF_INDEX_INST] = (uint32_t *)Osal_nwalLocToGlobAddr((uint32_t)nwalInstMem);
+ if(NETAPI_NWAL_CONFIG_BUFSIZE_NWAL_HANDLE < sizes[nwal_BUF_INDEX_INST])
+ {
+ /* Resize Memory */
+ while(1);
+ }
+ count++;
+
+ bases[nwal_BUF_INDEX_INT_HANDLES] = (uint32_t *)Osal_nwalLocToGlobAddr((uint32_t)nwalHandleMem);
+ if(NWAL_CHAN_HANDLE_SIZE < sizes[nwal_BUF_INDEX_INT_HANDLES])
+ {
+ /* Resize Memory */
+ while(1);
+ }
+ count++;
+ bases[nwal_BUF_INDEX_PA_LLD_BUF0] = (uint32_t *)Osal_nwalLocToGlobAddr((uint32_t)paBuf0);
+ if((NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF0) < sizes[nwal_BUF_INDEX_PA_LLD_BUF0])
+ {
+ /* Resize Memory */
+ while(1);
+ }
+ count++;
+
+ bases[nwal_BUF_INDEX_PA_LLD_BUF1] = (uint32_t *)Osal_nwalLocToGlobAddr((uint32_t)paBuf1);
+ if((NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF1) < sizes[nwal_BUF_INDEX_PA_LLD_BUF1])
+ {
+ /* Resize Memory */
+ while(1);
+ }
+ count++;
+
+ bases[nwal_BUF_INDEX_PA_LLD_BUF2] = (uint32_t *)Osal_nwalLocToGlobAddr((uint32_t)paBuf2);
+ if((NETAPI_NWAL_CONFIG_BUFSIZE_PA_BUF2) < sizes[nwal_BUF_INDEX_PA_LLD_BUF2])
+ {
+ /* Resize Memory */
+ while(1);
+ }
+ count++;
+#ifdef NETAPI_ENABLE_SECURITY
+ bases[nwal_BUF_INDEX_SA_LLD_HANDLE] = (uint32_t *)Osal_nwalLocToGlobAddr((uint32_t)salldHandle);
+ if((NETAPI_NWAL_CONFIG_BUFSIZE_SA_LLD_HANDLE) < sizes[nwal_BUF_INDEX_SA_LLD_HANDLE])
+ {
+ /* Resize Memory */
+ while(1);
+ }
+ count++;
+
+ bases[nwal_BUF_INDEX_SA_CONTEXT] = (uint32_t *)Osal_nwalLocToGlobAddr((uint32_t)netapi_VM_SaContextVaddr);
+ count++;
+
+ bases[nwal_BUF_INDEX_SA_LLD_CHAN_HANDLE] = (uint32_t *)Osal_nwalLocToGlobAddr((uint32_t)salldChanHandle);
+ if((NETAPI_NWAL_CONFIG_BUFSIZE_SA_LLD_HANDLE_PER_CHAN * TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS*2) <
+ sizes[nwal_BUF_INDEX_SA_LLD_CHAN_HANDLE])
+ {
+ /* Resize Memory */
+ while(1);
+ }
+ count++;
+#else
+ bases[nwal_BUF_INDEX_SA_LLD_HANDLE] = 0;
+ bases[nwal_BUF_INDEX_SA_CONTEXT] = 0;
+ bases[nwal_BUF_INDEX_SA_LLD_CHAN_HANDLE] = 0;
+ count = count+3;
+#endif
+ if(count != nwal_N_BUFS)
+ {
+ while(1);
+ }
+
+ /* Initialize NWAL module */
+ nwalRetVal = nwal_create(&nwalGlobCfg,
+ &nwalSizeInfo,
+ sizes,
+ bases,
+ &p_nwal_context->nwalInstHandle);
+ if(nwalRetVal != nwal_OK)
+ {
+ printf (">netapi: init_nwal- nwal_create Failed %d\n",nwalRetVal);
+ while(1);
+ }
+
+ printf(">netapi: init_nwal - Global and Local Network initialization Successful \n");
+ return 1;
+}
+
+//*************************************************
+//* Local (per thread/core) nwal initialization0
+//**************************************************
+int netapi_start_nwal(Pktlib_HeapHandle pkt_heap,
+ Pktlib_HeapHandle cmd_heap,
+ NETAPI_NWAL_LOCAL_CONTEXT_T *p,
+ NETAPI_CFG_T *p_cfg,
+ NETAPI_NWAL_GLOBAL_CONTEXT_T * p_nwal_glob_context )
+{
+ nwalLocCfg_t nwalLocCfg;
+ int count;
+ nwal_RetValue nwalRetVal;
+
+ memset(&nwalLocCfg,0,sizeof(nwalLocCfg));
+
+ /* Common Initialization for all cores */
+ while(count < TUNE_NETAPI_MAX_NUM_TRANS)
+ {
+ p_nwal_glob_context->transInfos[count].transId = count;
+ count++;
+ }
+
+ /* Update the Start of Packet Offset for the default flows created
+ * by NWAL
+ */
+ nwalLocCfg.rxSopPktOffset = p_cfg->def_flow_pkt_rx_offset;
+
+ /* Call back registration for the core */
+ nwalLocCfg.pRxPktCallBack = netapi_NWALRxPktCallback;
+ nwalLocCfg.pCmdCallBack = netapi_NWALCmdCallBack;
+ nwalLocCfg.pPaStatsCallBack = netapi_NWALCmdPaStatsReply;
+ nwalLocCfg.pRxDmCallBack= netapi_NWALSBPktCallback; //sideband mode callback
+
+ /* Initialize Buffer Pool for Control packets from NetCP to Host */
+ nwalLocCfg.rxCtlPool.numBufPools = 1;
+ nwalLocCfg.rxCtlPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;
+ nwalLocCfg.rxCtlPool.bufPool[0].bufSize = TUNE_NETAPI_CONFIG_MAX_CTL_RXTX_BUF_SIZE;
+ nwalLocCfg.rxCtlPool.bufPool[0].heapHandle = cmd_heap;
+
+ /* Initialize Buffer Pool for Control packets from Host to NetCP */
+ nwalLocCfg.txCtlPool.numBufPools = 1;
+ nwalLocCfg.txCtlPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;
+ nwalLocCfg.txCtlPool.bufPool[0].bufSize = TUNE_NETAPI_CONFIG_MAX_CTL_RXTX_BUF_SIZE;
+ nwalLocCfg.txCtlPool.bufPool[0].heapHandle = cmd_heap;
+
+/* Initialize Buffer Pool for Packets from NetCP to Host */
+ nwalLocCfg.rxPktPool.numBufPools = 1;
+ nwalLocCfg.rxPktPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;
+ nwalLocCfg.rxPktPool.bufPool[0].bufSize = TUNE_NETAPI_DEFAULT_BUFFER_SIZE;
+ nwalLocCfg.rxPktPool.bufPool[0].heapHandle = pkt_heap;
+
+/* Initialize Buffer Pool for Packets from Host to NetCP */
+ nwalLocCfg.txPktPool.numBufPools = 1;
+ nwalLocCfg.txPktPool.bufPool[0].descSize = TUNE_NETAPI_DESC_SIZE;
+ nwalLocCfg.txPktPool.bufPool[0].bufSize = TUNE_NETAPI_DEFAULT_BUFFER_SIZE;
+ nwalLocCfg.txPktPool.bufPool[0].heapHandle = pkt_heap;
+
+ memcpy(&p->nwalLocCfg,&nwalLocCfg,sizeof(nwalLocCfg_t));
+ while(1)
+ {
+ nwalRetVal = nwal_start(p_nwal_glob_context->nwalInstHandle,&nwalLocCfg);
+ if(nwalRetVal == nwal_ERR_INVALID_STATE)
+ {
+ continue;
+ }
+ break;
+ }
+
+ if(nwalRetVal != nwal_OK)
+ {
+ printf (">nwal_start:Failed ->err %d !!!\n", nwalRetVal);
+ return -1;
+ }
+ p->state = NETAPI_NW_CXT_LOC_ACTIVE;
+ return 1;
+
+
+}
+//***************************************************
+// intialize timer
+//***************************************************
+int netapi_init_timer(void)
+{
+ return t64_start();
+}
+
+
index b7244aa7760fe9a2eed809df884297ba343b041d..23615ee9df24439d612e897f138ed69df199c428 100755 (executable)
-/*****************************************\r
- * file: netapi_loc.h\r
- * purpose: internal netapi stuff\r
- ****************************************/\r
-\r
-#ifndef __NETAPI_LOC__H\r
-#define __NETAPI_LOC__H\r
-/***************************************\r
-* INTERNAL HANDLE STRUCTURE DEFINITION\r
-****************************************/\r
-\r
-/***********************************************\r
- * GLOBAL AREA\r
- * short term: this is global to process\r
- * (multi-process not supported)\r
- * long term: this structure gets put in shared memory \r
- ***********************************************/\r
-\r
-/* list of global pktio channels that have been created\r
- (NETCP_TX, RX are intrinsic so won't be here) */\r
-typedef struct PKTIO_ENTRY_tag\r
-{\r
- char name[PKTIO_MAX_NAME+1];\r
- Qmss_Queue qn; // -1 => slot is free\r
-} PKTIO_ENTRY_T;\r
-\r
-/* to hold an IP on an interface */\r
-typedef struct NETCP_INTERFACE_IP_Tag\r
-{\r
- int in_use;\r
- void * nwal_handle;\r
- nwal_IpType ip_type;\r
- nwalIpAddr_t ip_addr;\r
- nwalIpOpt_t ip_qualifiers;\r
-} NETCP_INTERFACE_IP_T;\r
-\r
-/* to hold a classifier */\r
-typedef struct NETCP_INTERFACE_CLASSIFIER_Tag\r
-{\r
- int in_use;\r
- int class_type; //see netcp_cfg.h\r
- void * nwal_L2_handle;\r
- void * nwal_L3_handle;\r
- void * nwal_L4_handle;\r
-} NETCP_INTERFACE_CLASSIFIER_T;\r
-\r
-/* to hold an ipsec rx policy */\r
-typedef struct NETCP_IPSEC_POLICY_Tag\r
-{\r
- int in_use;\r
- int tunnel; //associated tunnel\r
- void * nwal_handle; //handle associated with this RX Policy \r
-} NETCP_IPSEC_POLICY_T;\r
-\r
-/* to hold a tunnel */\r
-typedef struct NETCP_IPSEC_SA_Tag\r
-{\r
- int in_use;\r
- int inbound; //true if inbound\r
- \r
- int sa_mode; //mode we are going to use \r
-#define NETCP_IPSEC_SA_MODE_INFLOW 0\r
-#define NETCP_IPSEC_SA_MODE_SIDEBAND 1\r
- \r
- void * sa_handle_inflow; //for inflow mode\r
- void * sa_handle_sideband; //for sideband mode\r
- int iface; //associated interface\r
-} NETCP_IPSEC_SA_T;\r
-\r
-/* to hold a netcp 'interface' */\r
-typedef struct NETCP_INTERFACE_Tag\r
-{\r
- int in_use; /* 1 for valid */\r
- int state; /* 0=down, 1=up, future.. */\r
- void * nwal_handle; //handle associated with this interface\r
- unsigned char mac[6]; // mac address\r
- unsigned int vlan; //future\r
-} NETCP_INTERFACE_T;\r
-\r
-/*to keep track of netcp config transactions */\r
-typedef struct {\r
- nwal_Bool_t inUse;\r
- uint16_t transType;\r
-#define NETAPI_NWAL_HANDLE_TRANS_NONE 0\r
-#define NETAPI_NWAL_HANDLE_TRANS_MAC 1\r
-#define NETAPI_NWAL_HANDLE_TRANS_IP 2\r
-#define NETAPI_NWAL_HANDLE_TRANS_PORT 3\r
-#define NETAPI_NWAL_HANDLE_TRANS_SA 4\r
-#define NETAPI_NWAL_HANDLE_TRANS_SA_POLICY 5\r
-#define NETAPI_NWAL_HANDLE_STAT_REQUEST 6\r
-\r
- uint16_t state;\r
-#define NETAPI_NWAL_HANDLE_STATE_IDLE 0\r
-#define NETAPI_NWAL_HANDLE_STATE_OPEN_PENDING 1\r
-#define NETAPI_NWAL_HANDLE_STATE_OPEN 2\r
-#define NETAPI_NWAL_HANDLE_STATE_CLOSE_PENDING 3\r
- nwal_Handle handle;\r
- uint64_t transId;\r
- NETAPI_T netapi_handle; //the thread making the transaction\r
-} NetapiNwalTransInfo_t;\r
-\r
-/* to hold user defined flows */\r
-typedef struct NETCP_REGISTERED_FLOWS_Tag\r
-{\r
- int in_use; //1=> in use\r
- void * handle; //cppi handle to resource (internal)\r
- NETCP_CFG_FLOW_T flow;\r
-} NETCP_REGISTERED_FLOWS_T;\r
-\r
-/******************************\r
- * nwal global context \r
- * (shared over all instances)\r
- *******************************/\r
-typedef struct\r
-{\r
- int state;\r
-#define NETAPI_NW_CXT_GLOB_INACTIVE 0x0\r
-#define NETAPI__CXT_GLOB_ACTIVE 0x1\r
-#define NETAPI_NW_CXT_GLOB_RES_ALLOC_COMPLETE 0x3\r
-\r
- nwal_Handle nwalInstHandle;\r
- //internal heaps used just by netcp (sa<->pa). SW doesn't touch these\r
- Pktlib_HeapHandle sa2pa_heap;\r
- Pktlib_HeapHandle pa2sa_heap;\r
- //stats\r
- paSysStats_t paStats;\r
- int numCmdPass;\r
- int numCmdFail;\r
- int numBogusTransIds;\r
- NetapiNwalTransInfo_t transInfos[TUNE_NETAPI_MAX_NUM_TRANS]; //transactions\r
- NETCP_INTERFACE_T interfaces[TUNE_NETAPI_MAX_INTERFACES]; //interfaces\r
- NETCP_INTERFACE_CLASSIFIER_T classi[TUNE_NETAPI_MAX_CLASSIFIERS]; //classifiers\r
- NETCP_REGISTERED_FLOWS_T flows[TUNE_NETAPI_MAX_FLOWS]; //flows\r
- NETCP_IPSEC_SA_T tunnel[TUNE_NETAPI_MAX_SA]; //tunnels\r
- NETCP_IPSEC_POLICY_T policy[TUNE_NETAPI_MAX_POLICY]; //policies\r
- NETCP_INTERFACE_IP_T ips[TUNE_NETAPI_MAX_IP]; //ips\r
-} NETAPI_NWAL_GLOBAL_CONTEXT_T;\r
-\r
-/* NWAL Local context (per core/thread) */\r
-typedef struct\r
-{\r
- //void * nwalLocInstance;\r
-#define NETAPI_NW_CXT_LOC_INACTIVE 0x0\r
-#define NETAPI_NW_CXT_LOC_ACTIVE 0x2\r
- int state;\r
-\r
- int numPendingCfg;\r
- NETCP_CFG_STATS_CB stats_cb;\r
-\r
-/* stats */\r
- int numL2PktsRecvd;\r
- int numL3PktsRecvd;\r
- int numL4PktsRecvd;\r
- int numL4PktsSent;\r
- int TxErrDrop;\r
-\r
- /* local config */\r
- nwalLocCfg_t nwalLocCfg;\r
-} NETAPI_NWAL_LOCAL_CONTEXT_T;\r
-\r
-/* the global */\r
-typedef struct NETAPI_GLOBAL_tag\r
-{\r
-#define NETAPI_MAX_PKTIO (TUNE_NETAPI_MAX_PKTIO) \r
-PKTIO_ENTRY_T pktios[NETAPI_MAX_PKTIO];\r
-\r
-/* configuration */\r
-NETAPI_CFG_T cfg;\r
-\r
-/* global timers */\r
-\r
-/* nwal context */\r
-NETAPI_NWAL_GLOBAL_CONTEXT_T nwal_context;\r
-\r
-} NETAPI_GLOBAL_T;\r
-\r
-\r
-/************************************\r
- * this is a per thread structure.\r
- * It contains stuff local to thread\r
- * and pointer to global stuff\r
- * that is shared over all threads,\r
- **************************************/\r
-typedef struct NETAPI_HANDLE_Tag\r
-{\r
-int master; //master type\r
-\r
-void * global; /* pointer to the global area */\r
-\r
-/* heap handles */\r
-Pktlib_HeapHandle netcp_heap; /* internal default */\r
-Pktlib_HeapHandle netcp_control_heap; /* for control messages */\r
-Pktlib_HeapHandle createdHeaps[TUNE_NETAPI_MAX_HEAPS]; /* created by app and registered */\r
-\r
-/* pktios defined */\r
-int n_pktios; /* #of pktios that are active for this instance */\r
-void* pktios[NETAPI_MAX_PKTIO]; /* the list of pktios */\r
-\r
-/* scheduler stuff. unallocated if NETAPI_INCLUDE_SCHED not set */\r
-void * p_sched;\r
-\r
-/* nwal local context */\r
-NETAPI_NWAL_LOCAL_CONTEXT_T nwal_local;\r
-\r
-/* security stuff */\r
-\r
-/* timer stuff */\r
-\r
-/* thread cookie */\r
-void * cookie; /*set by calling thread */\r
-\r
-} NETAPI_HANDLE_T;\r
-\r
-\r
-//internal initialization routines */\r
-int netapi_init_qm(int max_descriptors);\r
-int netapi_init_cppi(void);\r
-int netapi_init_cpsw(void);\r
-int netapi_start_qm(void);\r
-int netapi_init_nwal(\r
- int region2use,\r
- Pktlib_HeapIfTable * p_table,\r
- NETAPI_NWAL_GLOBAL_CONTEXT_T * p_nwal_context,\r
- NETAPI_CFG_T *p_cfg );\r
-int netapi_start_nwal(Pktlib_HeapHandle pkt_heap,\r
- Pktlib_HeapHandle cmd_heap,\r
- NETAPI_NWAL_LOCAL_CONTEXT_T *p ,\r
- NETAPI_NWAL_GLOBAL_CONTEXT_T * p_nwal_glob_context );\r
-\r
-int netapi_init_timer(void);\r
-int netapi_qm_setup_mem_region(\r
- unsigned int numDesc,\r
- unsigned int descSize,\r
- unsigned int* pDescMemBase,\r
- int memRegion);\r
-//for above\r
-#define NETAPI_GLOBAL_REGION TUNE_NETAPI_QM_GLOBAL_REGION \r
-#define NETAPI_LOCAL_REGION ((NETAPI_GLOBAL_REGION)+1) \r
-\r
-int netapi_VM_memory_setup(void);\r
-void netapi_VM_memory_teardown(void);\r
-\r
-//nwal callbacks\r
-void netapi_NWALRxPktCallback (uint32_t appCookie,\r
- uint16_t numPkts,\r
- nwalRxPktInfo_t* pPktInfo,\r
- uint64_t timestamp,\r
- nwal_Bool_t* pFreePkt);\r
-\r
-void netapi_NWALCmdCallBack (nwal_AppId appHandle,\r
- uint16_t trans_id,\r
- nwal_RetValue ret);\r
-\r
-void netapi_NWALCmdPaStatsReply (nwal_AppId appHandle,\r
- nwal_TransID_t trans_id,\r
- paSysStats_t *stats);\r
-\r
-void netapi_NWALSBPktCallback (uint32_t appCookie,\r
- uint16_t numPkts,\r
- nwalDmRxPayloadInfo_t* pDmRxPktInfo,\r
- nwal_Bool_t* pFreePkt);\r
-\r
-\r
-//***********************************\r
-//internal utilities\r
-//*************************************\r
-\r
-//return the list of pktios for this instance\r
-static inline void ** netapi_get_pktio_list(NETAPI_T p)\r
-{\r
-NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;\r
-return &pp->pktios[0];\r
-}\r
-\r
-//get scheduler block handle\r
-static inline void * netapi_get_scheduler(NETAPI_T p)\r
-{\r
-NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;\r
-return pp->p_sched;\r
-}\r
-\r
-/* return pointer to global area */\r
-NETAPI_GLOBAL_T * netapi_get_global(void);\r
-\r
-//add a pktio name (and queue) to global list\r
-static inline int netapi_add_global_pktio(NETAPI_T p, char *name, Qmss_Queue * qn)\r
-{\r
-NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;\r
-PKTIO_ENTRY_T *pe;\r
-int i;\r
-//find a free slot\r
-pe = &((NETAPI_GLOBAL_T *)(pp->global))->pktios[0];\r
-\r
-for(i=0;i<NETAPI_MAX_PKTIO; i++,pe++)\r
- {\r
- if (pe->qn.qNum == -1)\r
- {\r
- pe->qn.qNum=qn->qNum;\r
- pe->qn.qMgr=qn->qMgr;\r
- strncpy(pe->name, name, PKTIO_MAX_NAME);\r
- return 1;\r
- }\r
- pe+=1;\r
- }\r
- return 0; //no room\r
-}\r
-\r
-//delete a pktio name (and queue) to global list\r
-static inline int netapi_del_global_pktio(NETAPI_T p, char *name)\r
-{\r
-NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;\r
-PKTIO_ENTRY_T *pe;\r
-int i;\r
-//find slot\r
-pe = &((NETAPI_GLOBAL_T *)(pp->global))->pktios[0];\r
-\r
-for(i=0;i<NETAPI_MAX_PKTIO; i++,pe++)\r
- {\r
- if (pe->qn.qNum == -1) continue;\r
- if (!strncmp(name, pe->name, PKTIO_MAX_NAME))\r
- {\r
- pe->qn.qNum=-1;\r
- pe->name[0]='\0';\r
- return 1;\r
- }\r
- pe+=1;\r
- }\r
- return 0; //no room\r
-}\r
-\r
-\r
-/* get list of global pktios that have been created */\r
-static inline Qmss_Queue* netapi_find_global_pktio(NETAPI_T p, char *name)\r
-{\r
-NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;\r
-PKTIO_ENTRY_T *pe;\r
-int i;\r
-//find slot\r
-pe = &((NETAPI_GLOBAL_T *)(pp->global))->pktios[0];\r
-\r
-for(i=0;i<NETAPI_MAX_PKTIO; i++,pe++)\r
- {\r
- if (pe->qn.qNum == -1) continue;\r
- if (!strncmp(name, pe->name, PKTIO_MAX_NAME))\r
- {\r
- return &pe->qn;\r
- }\r
- pe +=1;\r
- }\r
- return NULL; //not found\r
-}\r
-\r
-/* return the nwal global instance handle */\r
-static inline nwal_Handle netapi_return_nwal_instance_handle(NETAPI_T p)\r
-{\r
-\r
-NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;\r
-return ((NETAPI_GLOBAL_T *)(pp->global))->nwal_context.nwalInstHandle;\r
-}\r
-\r
-//utility to clear out a queue\r
-void netapi_zapQ(int queueNum);\r
-void netcp_cfgp_build_route(NETCP_CFG_ROUTE_T * p_route, int16_t * p_flow, Qmss_QueueHnd * p_q);\r
-\r
-//database utilities\r
-int netcp_cfgp_find_saslot( NETAPI_NWAL_GLOBAL_CONTEXT_T *p, int iface);\r
-void netcp_cfgp_delete_sa(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,int sa_slot);\r
-void netcp_cfgp_insert_sa(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int sa_slot, //we 'reserved it already'\r
- int dir,\r
- int mode,\r
- void * temp1,\r
- void * temp2,\r
- void * handle_inflow,\r
- void * handle_sideband);\r
-void *netcp_cfgp_get_sa_handles( NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int sa_slot, void ** p_sideband);\r
-void* netcp_cfgp_get_mac_handle(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,int iface_no);\r
-NetapiNwalTransInfo_t * netapip_GetFreeTransInfo(NETAPI_GLOBAL_T *p_global, nwal_TransID_t *pTransId);\r
-void *netcp_cfgp_get_policy( NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int policy_slot);\r
-#endif\r
+/*****************************************
+ * file: netapi_loc.h
+ * purpose: internal netapi stuff
+ ****************************************/
+
+#ifndef __NETAPI_LOC__H
+#define __NETAPI_LOC__H
+/***************************************
+* INTERNAL HANDLE STRUCTURE DEFINITION
+****************************************/
+
+/***********************************************
+ * GLOBAL AREA
+ * short term: this is global to process
+ * (multi-process not supported)
+ * long term: this structure gets put in shared memory
+ ***********************************************/
+
+/* list of global pktio channels that have been created
+ (NETCP_TX, RX are intrinsic so won't be here) */
+typedef struct PKTIO_ENTRY_tag
+{
+ char name[PKTIO_MAX_NAME+1];
+ Qmss_Queue qn; // -1 => slot is free
+} PKTIO_ENTRY_T;
+
+/* to hold an IP on an interface */
+typedef struct NETCP_INTERFACE_IP_Tag
+{
+ int in_use;
+ void * nwal_handle;
+ nwal_IpType ip_type;
+ nwalIpAddr_t ip_addr;
+ nwalIpOpt_t ip_qualifiers;
+} NETCP_INTERFACE_IP_T;
+
+/* to hold a classifier */
+typedef struct NETCP_INTERFACE_CLASSIFIER_Tag
+{
+ int in_use;
+ int class_type; //see netcp_cfg.h
+ void * nwal_L2_handle;
+ void * nwal_L3_handle;
+ void * nwal_L4_handle;
+} NETCP_INTERFACE_CLASSIFIER_T;
+
+/* to hold an ipsec rx policy */
+typedef struct NETCP_IPSEC_POLICY_Tag
+{
+ int in_use;
+ int tunnel; //associated tunnel
+ void * nwal_handle; //handle associated with this RX Policy
+} NETCP_IPSEC_POLICY_T;
+
+/* to hold a tunnel */
+typedef struct NETCP_IPSEC_SA_Tag
+{
+ int in_use;
+ int inbound; //true if inbound
+
+ int sa_mode; //mode we are going to use
+#define NETCP_IPSEC_SA_MODE_INFLOW 0
+#define NETCP_IPSEC_SA_MODE_SIDEBAND 1
+
+ void * sa_handle_inflow; //for inflow mode
+ void * sa_handle_sideband; //for sideband mode
+ int iface; //associated interface
+} NETCP_IPSEC_SA_T;
+
+/* to hold a netcp 'interface' */
+typedef struct NETCP_INTERFACE_Tag
+{
+ int in_use; /* 1 for valid */
+ int state; /* 0=down, 1=up, future.. */
+ void * nwal_handle; //handle associated with this interface
+ unsigned char mac[6]; // mac address
+ unsigned int vlan; //future
+} NETCP_INTERFACE_T;
+
+/*to keep track of netcp config transactions */
+typedef struct {
+ nwal_Bool_t inUse;
+ uint16_t transType;
+#define NETAPI_NWAL_HANDLE_TRANS_NONE 0
+#define NETAPI_NWAL_HANDLE_TRANS_MAC 1
+#define NETAPI_NWAL_HANDLE_TRANS_IP 2
+#define NETAPI_NWAL_HANDLE_TRANS_PORT 3
+#define NETAPI_NWAL_HANDLE_TRANS_SA 4
+#define NETAPI_NWAL_HANDLE_TRANS_SA_POLICY 5
+#define NETAPI_NWAL_HANDLE_STAT_REQUEST 6
+
+ uint16_t state;
+#define NETAPI_NWAL_HANDLE_STATE_IDLE 0
+#define NETAPI_NWAL_HANDLE_STATE_OPEN_PENDING 1
+#define NETAPI_NWAL_HANDLE_STATE_OPEN 2
+#define NETAPI_NWAL_HANDLE_STATE_CLOSE_PENDING 3
+ nwal_Handle handle;
+ uint64_t transId;
+ NETAPI_T netapi_handle; //the thread making the transaction
+} NetapiNwalTransInfo_t;
+
+/* to hold user defined flows */
+typedef struct NETCP_REGISTERED_FLOWS_Tag
+{
+ int in_use; //1=> in use
+ void * handle; //cppi handle to resource (internal)
+ NETCP_CFG_FLOW_T flow;
+} NETCP_REGISTERED_FLOWS_T;
+
+/******************************
+ * nwal global context
+ * (shared over all instances)
+ *******************************/
+typedef struct
+{
+ int state;
+#define NETAPI_NW_CXT_GLOB_INACTIVE 0x0
+#define NETAPI__CXT_GLOB_ACTIVE 0x1
+#define NETAPI_NW_CXT_GLOB_RES_ALLOC_COMPLETE 0x3
+
+ nwal_Handle nwalInstHandle;
+ //internal heaps used just by netcp (sa<->pa). SW doesn't touch these
+ Pktlib_HeapHandle sa2pa_heap;
+ Pktlib_HeapHandle pa2sa_heap;
+ //stats
+ paSysStats_t paStats;
+ int numCmdPass;
+ int numCmdFail;
+ int numBogusTransIds;
+ NetapiNwalTransInfo_t transInfos[TUNE_NETAPI_MAX_NUM_TRANS]; //transactions
+ NETCP_INTERFACE_T interfaces[TUNE_NETAPI_MAX_INTERFACES]; //interfaces
+ NETCP_INTERFACE_CLASSIFIER_T classi[TUNE_NETAPI_MAX_CLASSIFIERS]; //classifiers
+ NETCP_REGISTERED_FLOWS_T flows[TUNE_NETAPI_MAX_FLOWS]; //flows
+ NETCP_IPSEC_SA_T tunnel[TUNE_NETAPI_MAX_SA]; //tunnels
+ NETCP_IPSEC_POLICY_T policy[TUNE_NETAPI_MAX_POLICY]; //policies
+ NETCP_INTERFACE_IP_T ips[TUNE_NETAPI_MAX_IP]; //ips
+} NETAPI_NWAL_GLOBAL_CONTEXT_T;
+
+/* NWAL Local context (per core/thread) */
+typedef struct
+{
+ //void * nwalLocInstance;
+#define NETAPI_NW_CXT_LOC_INACTIVE 0x0
+#define NETAPI_NW_CXT_LOC_ACTIVE 0x2
+ int state;
+
+ int numPendingCfg;
+ NETCP_CFG_STATS_CB stats_cb;
+
+/* stats */
+ int numL2PktsRecvd;
+ int numL3PktsRecvd;
+ int numL4PktsRecvd;
+ int numL4PktsSent;
+ int TxErrDrop;
+
+ /* local config */
+ nwalLocCfg_t nwalLocCfg;
+} NETAPI_NWAL_LOCAL_CONTEXT_T;
+
+/* the global */
+typedef struct NETAPI_GLOBAL_tag
+{
+#define NETAPI_MAX_PKTIO (TUNE_NETAPI_MAX_PKTIO)
+PKTIO_ENTRY_T pktios[NETAPI_MAX_PKTIO];
+
+/* configuration */
+NETAPI_CFG_T cfg;
+
+/* global timers */
+
+/* nwal context */
+NETAPI_NWAL_GLOBAL_CONTEXT_T nwal_context;
+
+} NETAPI_GLOBAL_T;
+
+
+/************************************
+ * this is a per thread structure.
+ * It contains stuff local to thread
+ * and pointer to global stuff
+ * that is shared over all threads,
+ **************************************/
+typedef struct NETAPI_HANDLE_Tag
+{
+int master; //master type
+
+void * global; /* pointer to the global area */
+
+/* heap handles */
+Pktlib_HeapHandle netcp_heap; /* internal default */
+Pktlib_HeapHandle netcp_control_heap; /* for control messages */
+Pktlib_HeapHandle createdHeaps[TUNE_NETAPI_MAX_HEAPS]; /* created by app and registered */
+
+/* pktios defined */
+int n_pktios; /* #of pktios that are active for this instance */
+void* pktios[NETAPI_MAX_PKTIO]; /* the list of pktios */
+
+/* scheduler stuff. unallocated if NETAPI_INCLUDE_SCHED not set */
+void * p_sched;
+
+/* nwal local context */
+NETAPI_NWAL_LOCAL_CONTEXT_T nwal_local;
+
+/* security stuff */
+
+/* timer stuff */
+
+/* thread cookie */
+void * cookie; /*set by calling thread */
+
+} NETAPI_HANDLE_T;
+
+
+//internal initialization routines */
+int netapi_init_qm(int max_descriptors);
+int netapi_init_cppi(void);
+int netapi_init_cpsw(void);
+int netapi_start_qm(void);
+int netapi_init_nwal(
+ int region2use,
+ Pktlib_HeapIfTable * p_table,
+ NETAPI_NWAL_GLOBAL_CONTEXT_T * p_nwal_context,
+ NETAPI_CFG_T *p_cfg );
+int netapi_start_nwal(Pktlib_HeapHandle pkt_heap,
+ Pktlib_HeapHandle cmd_heap,
+ NETAPI_NWAL_LOCAL_CONTEXT_T *p ,
+ NETAPI_CFG_T *p_cfg,
+ NETAPI_NWAL_GLOBAL_CONTEXT_T * p_nwal_glob_context );
+
+int netapi_init_timer(void);
+int netapi_qm_setup_mem_region(
+ unsigned int numDesc,
+ unsigned int descSize,
+ unsigned int* pDescMemBase,
+ int memRegion);
+//for above
+#define NETAPI_GLOBAL_REGION TUNE_NETAPI_QM_GLOBAL_REGION
+#define NETAPI_LOCAL_REGION ((NETAPI_GLOBAL_REGION)+1)
+
+int netapi_VM_memory_setup(void);
+void netapi_VM_memory_teardown(void);
+
+//nwal callbacks
+void netapi_NWALRxPktCallback (uint32_t appCookie,
+ uint16_t numPkts,
+ nwalRxPktInfo_t* pPktInfo,
+ uint64_t timestamp,
+ nwal_Bool_t* pFreePkt);
+
+void netapi_NWALCmdCallBack (nwal_AppId appHandle,
+ uint16_t trans_id,
+ nwal_RetValue ret);
+
+void netapi_NWALCmdPaStatsReply (nwal_AppId appHandle,
+ nwal_TransID_t trans_id,
+ paSysStats_t *stats);
+
+void netapi_NWALSBPktCallback (uint32_t appCookie,
+ uint16_t numPkts,
+ nwalDmRxPayloadInfo_t* pDmRxPktInfo,
+ nwal_Bool_t* pFreePkt);
+
+
+//***********************************
+//internal utilities
+//*************************************
+
+//return the list of pktios for this instance
+static inline void ** netapi_get_pktio_list(NETAPI_T p)
+{
+NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;
+return &pp->pktios[0];
+}
+
+//get scheduler block handle
+static inline void * netapi_get_scheduler(NETAPI_T p)
+{
+NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;
+return pp->p_sched;
+}
+
+/* return pointer to global area */
+NETAPI_GLOBAL_T * netapi_get_global(void);
+
+//add a pktio name (and queue) to global list
+static inline int netapi_add_global_pktio(NETAPI_T p, char *name, Qmss_Queue * qn)
+{
+NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;
+PKTIO_ENTRY_T *pe;
+int i;
+//find a free slot
+pe = &((NETAPI_GLOBAL_T *)(pp->global))->pktios[0];
+
+for(i=0;i<NETAPI_MAX_PKTIO; i++,pe++)
+ {
+ if (pe->qn.qNum == -1)
+ {
+ pe->qn.qNum=qn->qNum;
+ pe->qn.qMgr=qn->qMgr;
+ strncpy(pe->name, name, PKTIO_MAX_NAME);
+ return 1;
+ }
+ pe+=1;
+ }
+ return 0; //no room
+}
+
+//delete a pktio name (and queue) to global list
+static inline int netapi_del_global_pktio(NETAPI_T p, char *name)
+{
+NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;
+PKTIO_ENTRY_T *pe;
+int i;
+//find slot
+pe = &((NETAPI_GLOBAL_T *)(pp->global))->pktios[0];
+
+for(i=0;i<NETAPI_MAX_PKTIO; i++,pe++)
+ {
+ if (pe->qn.qNum == -1) continue;
+ if (!strncmp(name, pe->name, PKTIO_MAX_NAME))
+ {
+ pe->qn.qNum=-1;
+ pe->name[0]='\0';
+ return 1;
+ }
+ pe+=1;
+ }
+ return 0; //no room
+}
+
+
+/* get list of global pktios that have been created */
+static inline Qmss_Queue* netapi_find_global_pktio(NETAPI_T p, char *name)
+{
+NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;
+PKTIO_ENTRY_T *pe;
+int i;
+//find slot
+pe = &((NETAPI_GLOBAL_T *)(pp->global))->pktios[0];
+
+for(i=0;i<NETAPI_MAX_PKTIO; i++,pe++)
+ {
+ if (pe->qn.qNum == -1) continue;
+ if (!strncmp(name, pe->name, PKTIO_MAX_NAME))
+ {
+ return &pe->qn;
+ }
+ pe +=1;
+ }
+ return NULL; //not found
+}
+
+/* return the nwal global instance handle */
+static inline nwal_Handle netapi_return_nwal_instance_handle(NETAPI_T p)
+{
+
+NETAPI_HANDLE_T *pp = (NETAPI_HANDLE_T *) p;
+return ((NETAPI_GLOBAL_T *)(pp->global))->nwal_context.nwalInstHandle;
+}
+
+//utility to clear out a queue
+void netapi_zapQ(int queueNum);
+void netcp_cfgp_build_route(NETCP_CFG_ROUTE_T * p_route, int16_t * p_flow, Qmss_QueueHnd * p_q);
+
+//database utilities
+int netcp_cfgp_find_saslot( NETAPI_NWAL_GLOBAL_CONTEXT_T *p, int iface);
+void netcp_cfgp_delete_sa(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,int sa_slot);
+void netcp_cfgp_insert_sa(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int sa_slot, //we 'reserved it already'
+ int dir,
+ int mode,
+ void * temp1,
+ void * temp2,
+ void * handle_inflow,
+ void * handle_sideband);
+void *netcp_cfgp_get_sa_handles( NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int sa_slot, void ** p_sideband);
+void* netcp_cfgp_get_mac_handle(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,int iface_no);
+NetapiNwalTransInfo_t * netapip_GetFreeTransInfo(NETAPI_GLOBAL_T *p_global, nwal_TransID_t *pTransId);
+void *netcp_cfgp_get_policy( NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int policy_slot);
+#endif
index a7e1756d441f22bc8663e65ba68b9df60a3d9767..d61ddc6acb06f6a8778842dd778f2ce43a7ce16d 100755 (executable)
-\r
-/******************************************************************************\r
- * FILE netapi_vm.c\r
- * PURPOSE: Memory allocator for NETAPI and related utilities\r
- * -- using MSMC for descriptors/buffers (current), use CMA (future) \r
- ******************************************************************************\r
- * FILE NAME: netapi_vm.c\r
- *\r
- * DESCRIPTION: Memory allocator for netapi \r
- * This is only a permanent memory allocator.\r
- *\r
- * REVISION HISTORY:\r
- *\r
- * Copyright (c) Texas Instruments Incorporated 2010-2011\r
- * \r
- * Redistribution and use in source and binary forms, with or without \r
- * modification, are permitted provided that the following conditions \r
- * are met:\r
- *\r
- * Redistributions of source code must retain the above copyright \r
- * notice, this list of conditions and the following disclaimer.\r
- *\r
- * Redistributions in binary form must reproduce the above copyright\r
- * notice, this list of conditions and the following disclaimer in the \r
- * documentation and/or other materials provided with the \r
- * distribution.\r
- *\r
- * Neither the name of Texas Instruments Incorporated nor the names of\r
- * its contributors may be used to endorse or promote products derived\r
- * from this software without specific prior written permission.\r
- *\r
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \r
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT \r
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT \r
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, \r
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT \r
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \r
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \r
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
- *\r
- */\r
-\r
-#include <stdint.h>\r
-#include <stdio.h>\r
-#include <string.h>\r
-#include <sys/types.h>\r
-#include <sys/stat.h>\r
-#include <fcntl.h>\r
-#include <sys/mman.h>\r
-#include <errno.h>\r
-#include <unistd.h>\r
-\r
-#include <ti/drv/nwal/nwal.h> \r
-#include "netapi_vm.h"\r
-#include <sys/ioctl.h>\r
-#include "tools/module/netapimod.h"\r
-\r
-\r
-/***********************RAW MEMORY ALLOCATION & TRANSLATION*************************/\r
-/* Macro to align x to y */\r
-#define align(x,y) ((x + y) & (~y))\r
-\r
-uint8_t *netapi_VM_mem_start_phy = (uint8_t*)0;\r
-uint8_t *netapi_VM_mem_start = (uint8_t*)0;\r
-uint8_t *netapi_VM_mem_end = (uint8_t*)0;\r
-uint8_t *netapi_VM_mem_end_phy = (uint8_t*)0;\r
-static uint8_t *netapi_VM_mem_alloc_ptr = (uint8_t*)0;\r
-static uint32_t netapi_VM_mem_size = 0;\r
-\r
-\r
-\r
-/* File descriptor for /dev/mem */ \r
-static int dev_mem_fd;\r
-static int our_netapi_module_fd;\r
-#define USE_MODULE_MMAP //we will mmap through netapi kernel module, not /dev/mem\r
-#ifndef USE_MODULE_MMAP\r
-static int temp_fd;\r
-#endif\r
-\r
-nwal_Bool_t netapi_VM_memAllocInit\r
-(\r
- uint8_t *addr, /* Physical address */\r
- uint32_t size /* Size of block */\r
-)\r
-{\r
- void *map_base; \r
-\r
- //always open dev/mem, since we need for QM, CPPI, etc\r
- if((dev_mem_fd = open("/dev/mem", (O_RDWR | O_SYNC))) == -1)\r
- {\r
- printf(">netapi_VM_memAllocInit: Failed to open \"dev/mem\" err=%s\n",\r
- strerror(errno));\r
- return nwal_FALSE;\r
- }\r
-\r
-#ifdef NETAPI_USE_MSMC\r
- // memory map in addr to addr+size (msmc)\r
- map_base = netapi_VM_memMap ((void *)addr, size); \r
-\r
- if (!map_base)\r
- {\r
- printf(">netapi_VM_memAllocInit: Failed to mmap addr (0x%x)", addr);\r
- return nwal_FALSE;\r
- }\r
-\r
- printf(">netapi_VM_memAllocInit (uncached msmc) Phy Addr %x Memory (%d bytes) mapped at address %p.\n", addr,size, map_base); \r
-#else \r
- //use cached DDR. This requires NETAPI kernel module\r
- our_netapi_module_fd=netapi_utilModInit();\r
-\r
- if (our_netapi_module_fd == -1) {\r
- printf(">netapi_VM_memAllocInit: failed to open /dev/netapi: '%s'\n", strerror(errno));\r
- return nwal_FALSE;\r
- }\r
- addr= ( uint8_t *) netapi_utilGetPhysOfBufferArea(); //get address that was allocated for us by kernela module */\r
- size = netapi_utilGetSizeOfBufferArea(); //get the size that was allocated\r
-#ifdef USE_MODULE_MMAP\r
- map_base = (void *) netapi_utilGetVaOfBufferArea(0,size); //mmap into our space, return va\r
-#else\r
- if( (temp_fd = open("/dev/mem", O_RDWR )) == -1) {\r
- printf(">netapi_VM_memAllocInit: failed to open dev/mem again cached err=%d\n",errno);\r
- return nwal_FALSE; \r
- }\r
-\r
- map_base = mmap(0,size , PROT_READ | PROT_WRITE, MAP_SHARED, temp_fd, addr);\r
- if(map_base == (void *) -1) {\r
- printf(">netapi_VM_memAllocInit: failed to mmap CMA area at phy %x err=%d\n",\r
- addr, errno); \r
- return nwal_FALSE;\r
- }\r
-#endif\r
- printf(">netapi_VM_memAllocInit: (cached ddr) Phy Addr %x Memory (%d bytes) mapped at address %p.\n", addr,size, map_base); \r
-#endif\r
-\r
- netapi_VM_mem_alloc_ptr = netapi_VM_mem_start = map_base;\r
- netapi_VM_mem_size = size;\r
- netapi_VM_mem_end = netapi_VM_mem_start + netapi_VM_mem_size;\r
- netapi_VM_mem_start_phy = addr;\r
- netapi_VM_mem_end_phy = netapi_VM_mem_start_phy + netapi_VM_mem_size;\r
- return nwal_TRUE;\r
-}\r
-\r
-void* netapi_VM_memAlloc\r
-(\r
- uint32_t size,\r
- uint32_t align\r
-)\r
-{\r
- uint32_t key;\r
- uint8_t *alloc_ptr;\r
- void *p_block =NULL;\r
-\r
- Osal_stubCsEnter();\r
- alloc_ptr = (uint8_t*)align((uint32_t)netapi_VM_mem_alloc_ptr, align);\r
- if ((alloc_ptr + size) < netapi_VM_mem_end)\r
- {\r
- p_block =(void *)alloc_ptr;\r
- netapi_VM_mem_alloc_ptr = alloc_ptr + size;\r
- Osal_stubCsExit(key);\r
- memset (p_block, 0, size);\r
- }\r
- else \r
- {\r
- Osal_stubCsExit(key);\r
- }\r
- return p_block;\r
-}\r
-uint32_t xtraLogs=0;\r
-/* Api to map the give physical address to virtual memory space */\r
-void *netapi_VM_memMap\r
-(\r
- void *addr, /* Physical address */\r
- uint32_t size /* Size of block */\r
-)\r
-{\r
- void *map_base,*virt_addr,*tmpAddr;\r
- uint32_t page_sz;\r
- long retval;\r
- uint32_t mask = (size-1);\r
- uint32_t offset;\r
-\r
- retval = sysconf(_SC_PAGE_SIZE);\r
- if (retval == -1)\r
- {\r
- printf(">netapi_VM_memMap: Failed to get page size err=%s\n",\r
- strerror(errno));\r
- return (void *)0;\r
- }\r
-\r
- page_sz = (uint32_t)retval;\r
-\r
- if (size%page_sz)\r
- {\r
- printf(">netapi_VM_memMap: error: block size not aligned to page size\n");\r
- return (void *)0;\r
- }\r
-\r
- if ((uint32_t)addr%page_sz)\r
- {\r
- printf(">netapi_VM_memMap: error: addr not aligned to page size\n");\r
- return (void *)0;\r
- }\r
-\r
- map_base = mmap(0, size, (PROT_READ|PROT_WRITE), MAP_SHARED, dev_mem_fd, (off_t)addr & ~mask);\r
- if(map_base == (void *) -1) \r
- {\r
- printf(">netapi_VM_memMap: Failed to mmap \"dev/mem\" err=%s\n",\r
- strerror(errno));\r
- return (void *)0;\r
- }\r
- virt_addr = map_base + ((off_t)addr & mask);\r
- if(xtraLogs)\r
- {\r
- printf(">netapi_VM_memMap:Memory mapped Begin Address 0x%x Read Value: 0x%x.\n", virt_addr,*((unsigned long *)virt_addr));\r
- // offset = size/(sizeof(unsigned long));\r
- // tmpAddr = (unsigned long *)virt_addr + offset-1;\r
- tmpAddr = (uint8_t *)virt_addr + 0x6800c;\r
- printf("netapi_VM_memMap:Memory mapped End Address 0x%x Read Value: 0x%x.\n", (unsigned long *)tmpAddr ,*((unsigned long *)tmpAddr));\r
- *((unsigned long *)tmpAddr) = 0x1234;\r
- printf("netapi_VM_memMap:Memory mapped End Address 0x%x Write Value: 0x%x.\n", (unsigned long *)tmpAddr ,*((unsigned long *)tmpAddr));\r
- \r
- }\r
- return(virt_addr);\r
-}\r
-\r
-/***************************************************************/\r
-/*************** Memory Initilaization**************************/\r
-/***************************************************************/\r
-/* for now use msmc */\r
-/* Total Permanent memory required in NWAL test\r
- * for Packet buffers & descriptor buffers\r
- */\r
-#define NETAPI_PERM_MEM_SZ (TUNE_NETAPI_PERM_MEM_SZ) \r
-\r
-/* Physical address map & size for various subsystems */\r
-#define QMSS_CFG_BASE_ADDR CSL_QM_SS_CFG_QUE_PEEK_REGS\r
-#define QMSS_CFG_BLK_SZ (1*1024*1024)\r
-#define QMSS_DATA_BASE_ADDR 0x44020000 \r
-#define QMSS_DATA_BLK_SZ (0x60000)\r
-#define SRIO_CFG_BASE_ADDR CSL_SRIO_CONFIG_REGS\r
-#define SRIO_CFG_BLK_SZ (132*1024)\r
-#define PASS_CFG_BASE_ADDR CSL_PA_SS_CFG_REGS \r
-#define PASS_CFG_BLK_SZ (1*1024*1024)\r
-\r
-#define MSMC_SRAM_BASE_ADDR CSL_MSMC_SRAM_REGS\r
-\r
-/* Global variables to hold virtual address of various subsystems */\r
-void *netapi_VM_qmssCfgVaddr;\r
-void *netapi_VM_qmssDataVaddr;\r
-void *netapi_VM_srioCfgVaddr;\r
-void *netapi_VM_passCfgVaddr;\r
-\r
-/* also for our descriptor area */\r
-unsigned char *netapi_VM_QMemLocalDescRam=NULL;\r
-unsigned char *netapi_VM_QMemGlobalDescRam=NULL;\r
-\r
-/* finaly SA context area */\r
-unsigned char *netapi_VM_SaContextVaddr=NULL;\r
-\r
-/************************************************\r
- * teardown VM memory\r
- ***********************************************/\r
-void netapi_VM_memory_teardown(void)\r
-{\r
- netapi_utilModClose();\r
- close(dev_mem_fd);\r
-#ifndef USE_MODULE_MMAP\r
- close(temp_fd);\r
-#endif\r
-}\r
-/*************************************************\r
- * setup VM memory\r
- ************************************************/\r
-int netapi_VM_memory_setup(void)\r
-{\r
-/* (1) big chunck of memory out of MSMC or DDR via kernel CMA */\r
-#ifdef NETAPI_USE_DDR\r
- if (netapi_VM_memAllocInit( NULL, 0) == nwal_FALSE) {\r
- printf(">netapi ERROR: netapi_V_MmemAllocInit from DDR/CMA failed\n");\r
- return (-1);\r
- }\r
-#else //uncached MSMC \r
- if (netapi_VM_memAllocInit((uint8_t*)MSMC_SRAM_BASE_ADDR,\r
- NETAPI_PERM_MEM_SZ) == nwal_FALSE) {\r
- printf(">netapi ERROR: netapi_V_MmemAllocInit from MSMC failed\n");\r
- return (-1);\r
- }\r
-#endif\r
-\r
- /* (2) Create virtual memory maps for peripherals */\r
- /* (2a) QMSS CFG Regs */\r
- netapi_VM_qmssCfgVaddr = netapi_VM_memMap((void*)QMSS_CFG_BASE_ADDR,\r
- QMSS_CFG_BLK_SZ);\r
- if (!netapi_VM_qmssCfgVaddr)\r
- {\r
- printf(">netapi ERROR: Failed to map QMSS CFG registers\n");\r
- return (-1);\r
- }\r
- printf(">netapi QMSS_CFG_BASE_ADDR:0x%x Memory mapped at address %p.\n",(void*)QMSS_CFG_BASE_ADDR, netapi_VM_qmssCfgVaddr);\r
-\r
- /* (2b) QMSS DATA Regs */\r
- netapi_VM_qmssDataVaddr = netapi_VM_memMap((void*)QMSS_DATA_BASE_ADDR,\r
- QMSS_DATA_BLK_SZ);\r
- if (!netapi_VM_qmssDataVaddr)\r
- {\r
- printf(">netapi ERROR: Failed to map QMSS DATA registers\n");\r
- return (-1);\r
- }\r
- printf(">netapi QMSS_DATA_BASE_ADDR:0x%x Memory mapped at address %p.\n",(void*)QMSS_DATA_BASE_ADDR, netapi_VM_qmssDataVaddr);\r
-\r
- /* (2c) SRIO CFG Regs */\r
- netapi_VM_srioCfgVaddr = netapi_VM_memMap((void*)SRIO_CFG_BASE_ADDR,\r
- SRIO_CFG_BLK_SZ);\r
- if (!netapi_VM_srioCfgVaddr)\r
- {\r
- printf(">netapi ERROR: Failed to map SRIO CFG registers\n");\r
- return (-1);\r
- }\r
- printf(">netapi SRIO_CFG_BASE_ADDR:0x%x Memory mapped at address %p.\n",(void*)SRIO_CFG_BASE_ADDR, netapi_VM_srioCfgVaddr);\r
-\r
- /* (2d) PASS CFG Regs */\r
- netapi_VM_passCfgVaddr = netapi_VM_memMap((void*)PASS_CFG_BASE_ADDR,\r
- PASS_CFG_BLK_SZ);\r
- if (!netapi_VM_passCfgVaddr)\r
- {\r
- printf(">netapi ERROR: Failed to map PASS CFG registers\n");\r
- return (-1);\r
- }\r
- printf(">netapi PASS_CFG_BASE_ADDR:0x%x Memory mapped at address %p.\n",(void*)PASS_CFG_BASE_ADDR, netapi_VM_passCfgVaddr);\r
-\r
- /* (2e) SA COntext area */\r
-#ifdef NETAPI_ENABLE_SECURITY\r
-#define SEC_CONTEXT_SZ 384 //not tunable\r
-/* allocate 2x number of tunnels since we need one for inflow and one for data mode */\r
- netapi_VM_SaContextVaddr = netapi_VM_memAlloc((TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS*2 *\r
- SEC_CONTEXT_SZ),\r
- 128);\r
- if (!netapi_VM_SaContextVaddr)\r
- {\r
- printf(">netapi ERROR: Failed to map SA context memory region\n");\r
- return (-1);\r
- }\r
- printf(">netapi VM_SaContext: Memory mapped/allocated at address %p.\n", netapi_VM_SaContextVaddr);\r
-\r
-#else\r
- netapi_VM_SaContextVaddr= (char *) NULL;\r
-#endif\r
-\r
- /* (2f) Timer */\r
- t64_memmap(dev_mem_fd);\r
-\r
- /* (3) Allocate 2 QM regions from continguous chunk above */\r
- netapi_VM_QMemGlobalDescRam = (void *)netapi_VM_memAlloc((TUNE_NETAPI_NUM_GLOBAL_DESC *\r
- TUNE_NETAPI_DESC_SIZE),\r
- 128);\r
- netapi_VM_QMemLocalDescRam = (void *)netapi_VM_memAlloc((TUNE_NETAPI_NUM_LOCAL_DESC *\r
- TUNE_NETAPI_DESC_SIZE),\r
- 128);\r
- printf(">netapi local desc region=%x global desc region=%x\n", netapi_VM_QMemLocalDescRam,netapi_VM_QMemGlobalDescRam);\r
-\r
- return 1;\r
-\r
-}\r
-\r
+
+/******************************************************************************
+ * FILE netapi_vm.c
+ * PURPOSE: Memory allocator for NETAPI and related utilities
+ * -- using MSMC for descriptors/buffers (current), use CMA (future)
+ ******************************************************************************
+ * FILE NAME: netapi_vm.c
+ *
+ * DESCRIPTION: Memory allocator for netapi
+ * This is only a permanent memory allocator.
+ *
+ * REVISION HISTORY:
+ *
+ * Copyright (c) Texas Instruments Incorporated 2010-2011
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <ti/drv/nwal/nwal.h>
+#include "netapi_vm.h"
+#include <sys/ioctl.h>
+#include "tools/module/netapimod.h"
+
+
+/***********************RAW MEMORY ALLOCATION & TRANSLATION*************************/
+/* Macro to align x to y */
+#define align(x,y) ((x + y) & (~y))
+
+#define NETAPI_PERM_MEM_SZ (TUNE_NETAPI_PERM_MEM_SZ)
+
+/* Physical address map & size for various subsystems */
+#define QMSS_CFG_BASE_ADDR CSL_QM_SS_CFG_QUE_PEEK_REGS
+#define QMSS_CFG_BLK_SZ (1*1024*1024)
+#define QMSS_DATA_BASE_ADDR 0x44020000
+#define QMSS_DATA_BLK_SZ (0x60000)
+#define SRIO_CFG_BASE_ADDR CSL_SRIO_CONFIG_REGS
+#define SRIO_CFG_BLK_SZ (132*1024)
+#define PASS_CFG_BASE_ADDR CSL_PA_SS_CFG_REGS
+#define PASS_CFG_BLK_SZ (1*1024*1024)
+
+#define MSMC_SRAM_BASE_ADDR CSL_MSMC_SRAM_REGS
+uint8_t *netapi_VM_mem_start_phy = (uint8_t*)0;
+uint8_t *netapi_VM_mem_start = (uint8_t*)0;
+uint8_t *netapi_VM_mem_end = (uint8_t*)0;
+uint8_t *netapi_VM_mem_end_phy = (uint8_t*)0;
+static uint8_t *netapi_VM_mem_alloc_ptr = (uint8_t*)0;
+static uint32_t netapi_VM_mem_size = 0;
+
+
+/* File descriptor for /dev/mem */
+static int dev_mem_fd;
+static int our_netapi_module_fd;
+#define USE_MODULE_MMAP //we will mmap through netapi kernel module, not /dev/mem
+#ifndef USE_MODULE_MMAP
+static int temp_fd;
+#endif
+
+nwal_Bool_t netapi_VM_memAllocInit
+(
+ uint8_t *addr, /* Physical address */
+ uint32_t size /* Size of block */
+)
+{
+ void *map_base;
+
+ //always open dev/mem, since we need for QM, CPPI, etc
+ if((dev_mem_fd = open("/dev/mem", (O_RDWR | O_SYNC))) == -1)
+ {
+ printf(">netapi_VM_memAllocInit: Failed to open \"dev/mem\" err=%s\n",
+ strerror(errno));
+ return nwal_FALSE;
+ }
+
+#ifdef NETAPI_USE_MSMC
+ // memory map in addr to addr+size (msmc)
+ map_base = netapi_VM_memMap ((void *)addr, size);
+
+ if (!map_base)
+ {
+ printf(">netapi_VM_memAllocInit: Failed to mmap addr (0x%x)", addr);
+ return nwal_FALSE;
+ }
+
+ printf(">netapi_VM_memAllocInit (uncached msmc) Phy Addr %x Memory (%d bytes) mapped at address %p.\n", addr,size, map_base);
+#else
+ //use cached DDR. This requires NETAPI kernel module
+ our_netapi_module_fd=netapi_utilModInit();
+
+ if (our_netapi_module_fd == -1) {
+ printf(">netapi_VM_memAllocInit: failed to open /dev/netapi: '%s'\n", strerror(errno));
+ return nwal_FALSE;
+ }
+ addr= ( uint8_t *) netapi_utilGetPhysOfBufferArea(); //get address that was allocated for us by kernela module */
+ size = netapi_utilGetSizeOfBufferArea(); //get the size that was allocated
+#ifdef USE_MODULE_MMAP
+ map_base = (void *) netapi_utilGetVaOfBufferArea(NETAPIMOD_MMAP_DMA_MEM_OFFSET, size); //mmap into our space, return va
+#else
+ if( (temp_fd = open("/dev/mem", O_RDWR )) == -1) {
+ printf(">netapi_VM_memAllocInit: failed to open dev/mem again cached err=%d\n",errno);
+ return nwal_FALSE;
+ }
+
+ map_base = mmap(0,size , PROT_READ | PROT_WRITE, MAP_SHARED, temp_fd, addr);
+ if(map_base == (void *) -1) {
+ printf(">netapi_VM_memAllocInit: failed to mmap CMA area at phy %x err=%d\n",
+ addr, errno);
+ return nwal_FALSE;
+ }
+#endif
+ printf(">netapi_VM_memAllocInit: (cached ddr) Phy Addr %x Memory (%d bytes) mapped at address %p.\n", addr,size, map_base);
+#endif
+
+ netapi_VM_mem_alloc_ptr = netapi_VM_mem_start = map_base;
+ netapi_VM_mem_size = size;
+ netapi_VM_mem_end = netapi_VM_mem_start + netapi_VM_mem_size;
+ netapi_VM_mem_start_phy = addr;
+ netapi_VM_mem_end_phy = netapi_VM_mem_start_phy + netapi_VM_mem_size;
+ return nwal_TRUE;
+}
+
+void* netapi_VM_memAlloc
+(
+ uint32_t size,
+ uint32_t align
+)
+{
+ uint32_t key;
+ uint8_t *alloc_ptr;
+ void *p_block =NULL;
+
+ Osal_stubCsEnter();
+ alloc_ptr = (uint8_t*)align((uint32_t)netapi_VM_mem_alloc_ptr, align);
+ if ((alloc_ptr + size) < netapi_VM_mem_end)
+ {
+ p_block =(void *)alloc_ptr;
+ netapi_VM_mem_alloc_ptr = alloc_ptr + size;
+ Osal_stubCsExit(key);
+ memset (p_block, 0, size);
+ }
+ else
+ {
+ Osal_stubCsExit(key);
+ }
+ return p_block;
+}
+uint32_t xtraLogs=0;
+/* Api to map the give physical address to virtual memory space */
+void *netapi_VM_memMap
+(
+ void *addr, /* Physical address */
+ uint32_t size /* Size of block */
+)
+{
+ void *map_base,*virt_addr,*tmpAddr;
+ uint32_t page_sz;
+ long retval;
+ uint32_t mask = (size-1);
+ uint32_t offset;
+
+ retval = sysconf(_SC_PAGE_SIZE);
+ if (retval == -1)
+ {
+ printf(">netapi_VM_memMap: Failed to get page size err=%s\n",
+ strerror(errno));
+ return (void *)0;
+ }
+
+ page_sz = (uint32_t)retval;
+
+ if (size%page_sz)
+ {
+ printf(">netapi_VM_memMap: error: block size not aligned to page size\n");
+ return (void *)0;
+ }
+
+ if ((uint32_t)addr%page_sz)
+ {
+ printf(">netapi_VM_memMap: error: addr not aligned to page size\n");
+ return (void *)0;
+ }
+
+ map_base = mmap(0, size, (PROT_READ|PROT_WRITE), MAP_SHARED, dev_mem_fd, (off_t)addr & ~mask);
+ if(map_base == (void *) -1)
+ {
+ printf(">netapi_VM_memMap: Failed to mmap \"dev/mem\" err=%s\n",
+ strerror(errno));
+ return (void *)0;
+ }
+ virt_addr = map_base + ((off_t)addr & mask);
+ if(xtraLogs)
+ {
+ printf(">netapi_VM_memMap:Memory mapped Begin Address 0x%x Read Value: 0x%x.\n", virt_addr,*((unsigned long *)virt_addr));
+ // offset = size/(sizeof(unsigned long));
+ // tmpAddr = (unsigned long *)virt_addr + offset-1;
+ tmpAddr = (uint8_t *)virt_addr + 0x6800c;
+ printf("netapi_VM_memMap:Memory mapped End Address 0x%x Read Value: 0x%x.\n", (unsigned long *)tmpAddr ,*((unsigned long *)tmpAddr));
+ *((unsigned long *)tmpAddr) = 0x1234;
+ printf("netapi_VM_memMap:Memory mapped End Address 0x%x Write Value: 0x%x.\n", (unsigned long *)tmpAddr ,*((unsigned long *)tmpAddr));
+
+ }
+ return(virt_addr);
+}
+
+/***************************************************************/
+/*************** Memory Initilaization**************************/
+/***************************************************************/
+/* for now use msmc */
+/* Total Permanent memory required in NWAL test
+ * for Packet buffers & descriptor buffers
+ */
+/* Global variables to hold virtual address of various subsystems */
+void *netapi_VM_qmssCfgVaddr;
+void *netapi_VM_qmssDataVaddr;
+void *netapi_VM_srioCfgVaddr;
+void *netapi_VM_passCfgVaddr;
+
+/* also for our descriptor area */
+unsigned char *netapi_VM_QMemLocalDescRam=NULL;
+unsigned char *netapi_VM_QMemGlobalDescRam=NULL;
+
+/* finaly SA context area */
+unsigned char *netapi_VM_SaContextVaddr=NULL;
+
+/************************************************
+ * teardown VM memory
+ ***********************************************/
+void netapi_VM_memory_teardown(void)
+{
+ netapi_utilModClose();
+ close(dev_mem_fd);
+#ifndef USE_MODULE_MMAP
+ close(temp_fd);
+#endif
+}
+/*************************************************
+ * setup VM memory
+ ************************************************/
+int netapi_VM_memory_setup(void)
+{
+/* (1) big chunck of memory out of MSMC or DDR via kernel CMA */
+#ifdef NETAPI_USE_DDR
+ if (netapi_VM_memAllocInit( NULL, 0) == nwal_FALSE) {
+ printf(">netapi ERROR: netapi_V_MmemAllocInit from DDR/CMA failed\n");
+ return (-1);
+ }
+#else //uncached MSMC
+ if (netapi_VM_memAllocInit((uint8_t*)MSMC_SRAM_BASE_ADDR,
+ NETAPI_PERM_MEM_SZ) == nwal_FALSE) {
+ printf(">netapi ERROR: netapi_V_MmemAllocInit from MSMC failed\n");
+ return (-1);
+ }
+#endif
+
+ /* (2) Create virtual memory maps for peripherals */
+ /* (2a) QMSS CFG Regs */
+ netapi_VM_qmssCfgVaddr = netapi_VM_memMap((void*)QMSS_CFG_BASE_ADDR,
+ QMSS_CFG_BLK_SZ);
+ if (!netapi_VM_qmssCfgVaddr)
+ {
+ printf(">netapi ERROR: Failed to map QMSS CFG registers\n");
+ return (-1);
+ }
+ printf(">netapi QMSS_CFG_BASE_ADDR:0x%x Memory mapped at address %p.\n",(void*)QMSS_CFG_BASE_ADDR, netapi_VM_qmssCfgVaddr);
+
+ /* (2b) QMSS DATA Regs */
+#ifdef USE_MODULE_MMAP
+ netapi_VM_qmssDataVaddr = (void *) netapi_utilGetVaOfBufferArea(NETAPIMOD_MMAP_QM_DATA_REG_MEM_OFFSET, QMSS_DATA_BLK_SZ);
+#else
+ netapi_VM_qmssDataVaddr = netapi_VM_memMap((void*)QMSS_DATA_BASE_ADDR,
+ QMSS_DATA_BLK_SZ);
+#endif
+
+ if (!netapi_VM_qmssDataVaddr)
+ {
+ printf(">netapi ERROR: Failed to map QMSS DATA registers\n");
+ return (-1);
+ }
+ printf(">netapi QMSS_DATA_BASE_ADDR:0x%x Memory mapped at address %p.\n",(void*)QMSS_DATA_BASE_ADDR, netapi_VM_qmssDataVaddr);
+
+ /* (2c) SRIO CFG Regs */
+ netapi_VM_srioCfgVaddr = netapi_VM_memMap((void*)SRIO_CFG_BASE_ADDR,
+ SRIO_CFG_BLK_SZ);
+ if (!netapi_VM_srioCfgVaddr)
+ {
+ printf(">netapi ERROR: Failed to map SRIO CFG registers\n");
+ return (-1);
+ }
+ printf(">netapi SRIO_CFG_BASE_ADDR:0x%x Memory mapped at address %p.\n",(void*)SRIO_CFG_BASE_ADDR, netapi_VM_srioCfgVaddr);
+
+ /* (2d) PASS CFG Regs */
+ netapi_VM_passCfgVaddr = netapi_VM_memMap((void*)PASS_CFG_BASE_ADDR,
+ PASS_CFG_BLK_SZ);
+ if (!netapi_VM_passCfgVaddr)
+ {
+ printf(">netapi ERROR: Failed to map PASS CFG registers\n");
+ return (-1);
+ }
+ printf(">netapi PASS_CFG_BASE_ADDR:0x%x Memory mapped at address %p.\n",(void*)PASS_CFG_BASE_ADDR, netapi_VM_passCfgVaddr);
+
+ /* (2e) SA COntext area */
+#ifdef NETAPI_ENABLE_SECURITY
+#define SEC_CONTEXT_SZ 384 //not tunable
+/* allocate 2x number of tunnels since we need one for inflow and one for data mode */
+ netapi_VM_SaContextVaddr = netapi_VM_memAlloc((TUNE_NETAPI_MAX_NUM_IPSEC_CHANNELS*2 *
+ SEC_CONTEXT_SZ),
+ 128);
+ if (!netapi_VM_SaContextVaddr)
+ {
+ printf(">netapi ERROR: Failed to map SA context memory region\n");
+ return (-1);
+ }
+ printf(">netapi VM_SaContext: Memory mapped/allocated at address %p.\n", netapi_VM_SaContextVaddr);
+
+#else
+ netapi_VM_SaContextVaddr= (char *) NULL;
+#endif
+
+ /* (2f) Timer */
+ t64_memmap(dev_mem_fd);
+
+ /* (3) Allocate 2 QM regions from continguous chunk above */
+ netapi_VM_QMemGlobalDescRam = (void *)netapi_VM_memAlloc((TUNE_NETAPI_NUM_GLOBAL_DESC *
+ TUNE_NETAPI_DESC_SIZE),
+ 128);
+ netapi_VM_QMemLocalDescRam = (void *)netapi_VM_memAlloc((TUNE_NETAPI_NUM_LOCAL_DESC *
+ TUNE_NETAPI_DESC_SIZE),
+ 128);
+ printf(">netapi local desc region=%x global desc region=%x\n", netapi_VM_QMemLocalDescRam,netapi_VM_QMemGlobalDescRam);
+
+ return 1;
+
+}
+
index 851894be0977e88d87ea527ea6b647b10ac445c3..a9b181aaa143136349c89ac970d311b48ced6fe8 100755 (executable)
-/************************************************\r
- * FILE: netapi_vm.h\r
- * PURPOSE: netapi [virtual] memory management\r
- ************************************************/\r
-#ifndef __NETAPI_VM_H__\r
-#define __NETAPI_VM_H__\r
-#include "netapi_tune.h"\r
-#include "ti/drv/nwal/nwal.h"\r
-\r
-/* Function to initialize memory allocator */\r
-nwal_Bool_t netapi_VM_memAllocInit\r
-(\r
- uint8_t *addr, /* Physical address */\r
- uint32_t size /* Size of block */\r
-);\r
-\r
-/* Function to allocate memory */\r
-void* netapi_VM_memAlloc\r
-(\r
- uint32_t size, /* Size of block needed */\r
- uint32_t align /* Alignment of the block needed */\r
-);\r
-\r
-\r
-/* Function to map the give physical address to virtual memory space */\r
-void *netapi_VM_memMap\r
-(\r
- void *addr, /* Physical address */\r
- uint32_t size /* Size of block */\r
-);\r
-\r
-//todo: put in netapi global region somewhere\r
-\r
-/* Global variables to hold virtual address of various hw subsystems */\r
-extern void *netapi_VM_qmssCfgVaddr;\r
-extern void *netapi_VM_qmssDataVaddr;\r
-extern void *netapi_VM_srioCfgVaddr;\r
-extern void *netapi_VM_passCfgVaddr;\r
-\r
-/* Physical address of the [only] memory pool */\r
-extern uint8_t *netapi_VM_mem_start_phy;\r
-extern uint8_t *netapi_VM_mem_end_phy;\r
-\r
-/* virtual address of the [only] memory pool */\r
-extern uint8_t *netapi_VM_mem_start;\r
-extern uint8_t *netapi_VM_mem_end;\r
-\r
-//qm regions: netapi defines two regions, 0,1\r
-extern unsigned char *netapi_VM_QMemLocalDescRam;\r
-extern unsigned char *netapi_VM_QMemGlobalDescRam;\r
-\r
-//SA context region\r
-extern unsigned char *netapi_VM_SaContextVaddr;\r
-\r
-\r
-#endif\r
-\r
+/************************************************
+ * FILE: netapi_vm.h
+ * PURPOSE: netapi [virtual] memory management
+ ************************************************/
+#ifndef __NETAPI_VM_H__
+#define __NETAPI_VM_H__
+#include "netapi_tune.h"
+#include "ti/drv/nwal/nwal.h"
+
+/* Function to initialize memory allocator */
+nwal_Bool_t netapi_VM_memAllocInit
+(
+ uint8_t *addr, /* Physical address */
+ uint32_t size /* Size of block */
+);
+
+/* Function to allocate memory */
+void* netapi_VM_memAlloc
+(
+ uint32_t size, /* Size of block needed */
+ uint32_t align /* Alignment of the block needed */
+);
+
+
+/* Function to map the give physical address to virtual memory space */
+void *netapi_VM_memMap
+(
+ void *addr, /* Physical address */
+ uint32_t size /* Size of block */
+);
+
+//todo: put in netapi global region somewhere
+
+/* Global variables to hold virtual address of various hw subsystems */
+extern void *netapi_VM_qmssCfgVaddr;
+extern void *netapi_VM_qmssDataVaddr;
+extern void *netapi_VM_srioCfgVaddr;
+extern void *netapi_VM_passCfgVaddr;
+
+/* Physical address of the [only] memory pool */
+extern uint8_t *netapi_VM_mem_start_phy;
+extern uint8_t *netapi_VM_mem_end_phy;
+
+/* virtual address of the [only] memory pool */
+extern uint8_t *netapi_VM_mem_start;
+extern uint8_t *netapi_VM_mem_end;
+
+//qm regions: netapi defines two regions, 0,1
+extern unsigned char *netapi_VM_QMemLocalDescRam;
+extern unsigned char *netapi_VM_QMemGlobalDescRam;
+
+//SA context region
+extern unsigned char *netapi_VM_SaContextVaddr;
+
+
+#endif
+
index 2ae66eb34b75873a7bc1197b2d3486e9a5981f99..77003686fde0119873ded4b2d3952855df72f050 100755 (executable)
-/**********************************************************\r
- * file: netcp_cfg.c\r
- * purpose: netcp configurations routines\r
- **************************************************************\r
- * FILE: netcp_cfg.c\r
- * \r
- * DESCRIPTION: netcp configuration main source file for user space transport\r
- * library\r
- * \r
- * REVISION HISTORY: rev 0.0.1 \r
- *\r
- * Copyright (c) Texas Instruments Incorporated 2010-2011\r
- * \r
- * Redistribution and use in source and binary forms, with or without \r
- * modification, are permitted provided that the following conditions \r
- * are met:\r
- *\r
- * Redistributions of source code must retain the above copyright \r
- * notice, this list of conditions and the following disclaimer.\r
- *\r
- * Redistributions in binary form must reproduce the above copyright\r
- * notice, this list of conditions and the following disclaimer in the \r
- * documentation and/or other materials provided with the \r
- * distribution.\r
- *\r
- * Neither the name of Texas Instruments Incorporated nor the names of\r
- * its contributors may be used to endorse or promote products derived\r
- * from this software without specific prior written permission.\r
- *\r
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \r
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT \r
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT \r
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, \r
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT \r
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \r
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \r
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
-\r
- ******************************************************/\r
-#include <stdio.h>\r
-#include <stdlib.h>\r
-#include <unistd.h>\r
-#include <string.h>\r
-#include "netapi.h"\r
-#include "netcp_cfg.h"\r
-#include "netapi_loc.h"\r
-\r
-/******************************************************************\r
- ********************Utility*************************************\r
-*******************************************************************/\r
-\r
-\r
-//get a free transaction id\r
-NetapiNwalTransInfo_t * netapip_GetFreeTransInfo(NETAPI_GLOBAL_T *p_global, nwal_TransID_t *pTransId)\r
-{\r
- uint16_t count=0;\r
-\r
- count=0;\r
- while(count < TUNE_NETAPI_MAX_NUM_TRANS)\r
- {\r
- if((p_global->nwal_context.transInfos[count].inUse) != nwal_TRUE)\r
- {\r
- p_global->nwal_context.transInfos[count].inUse = nwal_TRUE;\r
- *pTransId = count;\r
- return(&p_global->nwal_context.transInfos[count]);\r
- }\r
- count++;\r
- }\r
- \r
- /* trouble. need to wait for one to free up*/\r
- /* to do: handle this by forcing a poll of cntrl queue*/\r
- printf(">netcp_cfg: trying to get free transaction slot but all full!!\n");\r
- return NULL;\r
-\r
-}\r
-//internal: build route\r
-void netcp_cfgp_build_route(NETCP_CFG_ROUTE_T * p_route, int16_t * p_flow, Qmss_QueueHnd * p_q)\r
-{\r
- if (!p_route) return;\r
- if (p_route->p_flow) *p_flow= p_route->p_flow->flowid;\r
- else *p_flow = CPPI_PARAM_NOT_SPECIFIED;\r
- if (p_route->p_dest_q) *p_q = pktio_get_q(p_route->p_dest_q);\r
- else *p_q=QMSS_PARAM_NOT_SPECIFIED;\r
-}\r
-/*-----------------------------------------------------------*/\r
-/*----------------database management stuff-------------------*/\r
-/*-----------------------------------------------------------*/\r
-\r
-/*=====================Policies=============================*/\r
-//internal: find a free slot for an SA \r
-int netcp_cfgp_find_policy_slot( NETAPI_NWAL_GLOBAL_CONTEXT_T *p, int tunnel)\r
-{\r
- int i;\r
- if ((tunnel <0 ) || (tunnel >=TUNE_NETAPI_MAX_SA)) return -1;\r
-\r
- //find a free entry\r
- for(i=0;i<TUNE_NETAPI_MAX_POLICY;i++)\r
- {\r
- if (!p->policy[i].in_use)\r
- {\r
- p->policy[i].in_use = 2; //pending\r
- p->policy[i].tunnel= tunnel; //save tunnel this is linked to \r
- return i;\r
- }\r
- }\r
- return -1;\r
-}\r
-\r
-//internal: delete a policy from list \r
-void netcp_cfgp_delete_policy(\r
- NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int policy_slot )\r
-{\r
- if ((policy_slot <0 ) || (policy_slot >= TUNE_NETAPI_MAX_POLICY))\r
- {\r
- return ;\r
- }\r
- p->policy[policy_slot].in_use=0;\r
- return;\r
-}\r
-\r
-//internal: insert an policy into the list \r
-void netcp_cfgp_insert_policy(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int policy_slot, //we 'reserved it already'\r
- void * handle)\r
-{\r
- p->policy[policy_slot].in_use=1;\r
- p->policy[policy_slot].nwal_handle = handle;\r
- return;\r
-}\r
-\r
-//internal: return nwal_handle for policy \r
-void *netcp_cfgp_get_policy( NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int policy_slot)\r
-{\r
- if ((policy_slot <0 ) || (policy_slot >= TUNE_NETAPI_MAX_POLICY)) return NULL;\r
- if (!p->policy[policy_slot].in_use) return NULL;\r
- return p->policy[policy_slot].nwal_handle;\r
-}\r
-\r
-\r
-\r
-/*======================SAs==================================*/\r
-//internal: find a free slot for an SA \r
-int netcp_cfgp_find_sa_slot( NETAPI_NWAL_GLOBAL_CONTEXT_T *p, int iface)\r
-{ \r
- int i;\r
- if (iface != NETCP_CFG_NO_INTERFACE)\r
- {\r
- if ((iface <0 ) || (iface >=TUNE_NETAPI_MAX_INTERFACES)) return -1;\r
- }\r
- //find a free entry\r
- for(i=0;i<TUNE_NETAPI_MAX_SA;i++)\r
- { \r
- if (!p->tunnel[i].in_use)\r
- {\r
- p->tunnel[i].in_use = 2; //pending\r
- p->tunnel[i].iface= iface; //save iface\r
- return i;\r
- }\r
- }\r
- return -1;\r
-}\r
- //internal: delete an SAr from list \r
-void netcp_cfgp_delete_sa(\r
- NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int sa_slot )\r
-{\r
- if ((sa_slot <0 ) || (sa_slot >= TUNE_NETAPI_MAX_SA))\r
- {\r
- return ;\r
- }\r
- p->tunnel[sa_slot].in_use=0;\r
- return;\r
-}\r
-\r
-//internal: insert an SA into the list \r
-void netcp_cfgp_insert_sa(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int sa_slot, //we 'reserved it already'\r
- int dir,\r
- int mode,\r
- void * temp1,\r
- void * temp2,\r
- void * handle_inflow,\r
- void * handle_sideband)\r
-{\r
- p->tunnel[sa_slot].in_use=1;\r
- p->tunnel[sa_slot].inbound = dir;\r
- p->tunnel[sa_slot].sa_mode = mode;\r
- p->tunnel[sa_slot].sa_handle_inflow = handle_inflow;\r
- p->tunnel[sa_slot].sa_handle_sideband = handle_sideband;\r
- return;\r
-}\r
-\r
-//internal: return nwal_handles for SA \r
-void *netcp_cfgp_get_sa_handles( NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int sa_slot, void ** p_sideband)\r
-{\r
- if ((sa_slot <0 ) || (sa_slot >= TUNE_NETAPI_MAX_SA)) return NULL;\r
- if (!p->tunnel[sa_slot].in_use) return NULL;\r
- *p_sideband = p->tunnel[sa_slot].sa_handle_sideband;\r
- return p->tunnel[sa_slot].sa_handle_inflow;\r
-}\r
-\r
-\r
-/*==============================fLOWS=============================*/\r
-//internal: find a free slot for a flow \r
-static int netcp_cfgp_find_flow_slot( NETAPI_NWAL_GLOBAL_CONTEXT_T *p)\r
-{\r
- int i;\r
- //find a free entry\r
- for(i=0;i<TUNE_NETAPI_MAX_FLOWS;i++)\r
- {\r
- if (!p->flows[i].in_use)\r
- {\r
- p->flows[i].in_use = 2; //pending\r
- return i;\r
- }\r
- }\r
- return -1;\r
-}\r
-\r
-//internal: clear flow slot \r
-static void netcp_cfgp_delete_flow(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,int slot)\r
-{\r
- if ((slot >=0 ) && (slot < TUNE_NETAPI_MAX_FLOWS))\r
- {\r
- p->flows[slot].in_use = 0;\r
- }\r
-}\r
-\r
-//internal: insert a flow into flow slot\r
-static NETCP_CFG_FLOW_HANDLE_T netcp_cfgp_insert_flow(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int slot, //we 'reserved it already'\r
- void * handle) //cppi flow handle. Save this for delete\r
-{\r
- p->flows[slot].in_use=1;\r
- p->flows[slot].handle = handle;\r
- p->flows[slot].flow.flowid = Cppi_getFlowId(handle);\r
- return (NETCP_CFG_FLOW_HANDLE_T) &p->flows[slot].flow;\r
-}\r
-\r
-//find entry matching the flowid. return slot# and the cppi handle\r
-static int netcp_cfgp_find_flow(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int flowid, \r
- void ** handle) \r
-{\r
-int i;\r
- *handle=NULL;\r
- for(i=0;i<TUNE_NETAPI_MAX_FLOWS;i++)\r
- {\r
- if ((p->flows[i].in_use)&&(p->flows[i].flow.flowid == flowid))\r
- {\r
- *handle = p->flows[i].handle;\r
- return i;\r
- }\r
- }\r
- return -1;\r
-}\r
-\r
-\r
-/*============================IP ADDRESSES==========================*/\r
-\r
-//internal: find a free slot for IP rule \r
-static int netcp_cfgp_find_ip_slot(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int iface_no)\r
-{\r
- int i;\r
-\r
- //find a free entry\r
- for(i=0;i<TUNE_NETAPI_MAX_IP;i++)\r
- {\r
- if (!p->ips[i].in_use)\r
- {\r
- p->ips[i].in_use = 2; //pending\r
- return i;\r
- }\r
- }\r
- return -1;\r
-}\r
-\r
-\r
-//internal: insert an IP address into iface\r
-static void netcp_cfgp_insert_ip(NETAPI_NWAL_GLOBAL_CONTEXT_T *p, \r
- nwal_IpType ipType,\r
- nwalIpAddr_t *ip_addr, \r
- nwalIpOpt_t *ip_qualifiers, \r
- int iface_no,\r
- int ip_slot, //we 'reserved it already'\r
- void * handle)\r
-{\r
- p->ips[ip_slot].in_use=1;\r
- memcpy(&p->ips[ip_slot].ip_addr, ip_addr, sizeof(nwalIpAddr_t));\r
- if(ip_qualifiers)\r
- memcpy(&p->ips[ip_slot].ip_qualifiers, ip_qualifiers, sizeof(nwalIpOpt_t));\r
- else\r
- memset(&p->ips[ip_slot].ip_qualifiers, 0, sizeof(nwalIpOpt_t));\r
- p->ips[ip_slot].ip_type = ipType;\r
- p->ips[ip_slot].nwal_handle = handle;\r
- return;\r
-}\r
-\r
-\r
-//internal: free IP slot associated with ip address \r
-static void netcp_cfgp_delete_ip(\r
- NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int iface_no,\r
- int ip_slot )\r
-{\r
- if ((ip_slot <0)||(ip_slot>TUNE_NETAPI_MAX_IP)) return ;\r
- p->ips[ip_slot].in_use=0;\r
- return;\r
-}\r
-\r
-\r
-//internal: get IP handle associated with ip address \r
-static void *netcp_cfgp_get_ip_handle(\r
- NETAPI_NWAL_GLOBAL_CONTEXT_T *p, \r
- int iface_no,\r
- int ip_slot )\r
-{\r
- if ((ip_slot <0)||(ip_slot>=TUNE_NETAPI_MAX_IP)) return NULL;\r
- if (!p->ips[ip_slot].in_use) return NULL;\r
- return (void *) p->ips[ip_slot].nwal_handle;\r
-}\r
-\r
-/*==========================MAC INTERFACES======================*/\r
-//internal: insert interface info into global context\r
-static void netcp_cfgp_insert_mac(NETAPI_NWAL_GLOBAL_CONTEXT_T *p, unsigned char * p_mac,\r
- int iface_no, int state, NETCP_CFG_VLAN_T vlan, void * handle)\r
-{\r
- if ((iface_no >=0 ) && (iface_no < TUNE_NETAPI_MAX_INTERFACES))\r
- {\r
- memset(&p->interfaces[iface_no],0,sizeof(NETCP_INTERFACE_T));\r
- p->interfaces[iface_no].in_use = 1;\r
- memcpy(&p->interfaces[iface_no].mac[0], p_mac,6);\r
- p->interfaces[iface_no].state = state;\r
- //todo p->interfaces[iface_no].vlan = vlan;\r
- p->interfaces[iface_no].nwal_handle = handle; //save handle assoicated with this rule\r
- }\r
- else printf(">netcp_cfg insert interface # out of range %d\n",iface_no);\r
-\r
-}\r
-\r
-//internal: get handle associated with interface\r
-void* netcp_cfgp_get_mac_handle(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,int iface_no)\r
-{\r
- if (iface_no == NETCP_CFG_NO_INTERFACE) return NULL;\r
- if ((iface_no <0 ) || (iface_no >= TUNE_NETAPI_MAX_INTERFACES))\r
- {\r
- return NULL;\r
- }\r
- else if ( p->interfaces[iface_no].in_use)\r
- {\r
- return (void *) p->interfaces[iface_no].nwal_handle;\r
- }\r
- //no valid entry in slot\r
- return NULL;\r
-}\r
-//internal: clear inteface entry\r
-static void netcp_cfgp_delete_mac(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,int iface_no)\r
-{\r
- if ((iface_no >=0 ) && (iface_no < TUNE_NETAPI_MAX_INTERFACES))\r
- {\r
- p->interfaces[iface_no].in_use = 0;\r
- }\r
-}\r
-\r
-\r
-/*========================CLASSIFIERS==========================*/\r
-//internal: find a free slot for classifier rule\r
-static int netcp_cfgp_find_class_slot( NETAPI_NWAL_GLOBAL_CONTEXT_T *p)\r
-{\r
- int i;\r
- //find a free entry\r
- for(i=0;i<TUNE_NETAPI_MAX_CLASSIFIERS;i++)\r
- {\r
- if (!p->classi[i].in_use)\r
- {\r
- p->classi[i].in_use = 2; //pending\r
- return i;\r
- }\r
- }\r
- return -1;\r
-}\r
-\r
- //internal: delete a classifer from list \r
-static void netcp_cfgp_delete_class(\r
- NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int class_slot )\r
-{\r
- if ((class_slot <0 ) || (class_slot >= TUNE_NETAPI_MAX_CLASSIFIERS))\r
- {\r
- return ;\r
- }\r
- p->classi[class_slot].in_use=0;\r
- return;\r
-}\r
-\r
-//internal: insert a classifier into list \r
-static void netcp_cfgp_insert_class(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int class_slot, //we 'reserved it already'\r
- int class_type,\r
- void * L2_handle,\r
- void * L3_handle,\r
- void * L4_handle)\r
-{\r
- p->classi[class_slot].in_use=1;\r
- p->classi[class_slot].nwal_L2_handle = L2_handle;\r
- p->classi[class_slot].nwal_L3_handle = L3_handle;\r
- p->classi[class_slot].nwal_L4_handle = L4_handle;\r
- p->classi[class_slot].class_type = class_type;\r
- return;\r
-}\r
-\r
-//internal: return L4 nwal_handle for class\r
-static void *netcp_cfgp_get_l4_handle( NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int class_slot)\r
-{\r
- if ((class_slot <0 ) || (class_slot >= TUNE_NETAPI_MAX_CLASSIFIERS)) return NULL;\r
- if (!p->classi[class_slot].in_use) return NULL;\r
- return p->classi[class_slot].nwal_L4_handle;\r
-}\r
-\r
-//internal: return L3 nwal_handle for class\r
-static void *netcp_cfgp_get_l3_handle( NETAPI_NWAL_GLOBAL_CONTEXT_T *p,\r
- int class_slot)\r
-{\r
- if ((class_slot <0 ) || (class_slot >= TUNE_NETAPI_MAX_CLASSIFIERS)) return NULL;\r
- if (!p->classi[class_slot].in_use) return NULL;\r
- return p->classi[class_slot].nwal_L3_handle;\r
-}\r
-\r
-\r
-/***********************************************************************************/\r
-/****************************************API****************************************/\r
-/***********************************************************************************/\r
-\r
-\r
-/*****************************************************************\r
- * Queury Stats\r
- ****************************************************************/\r
-void netcp_cfgReqStats(NETAPI_T h, NETCP_CFG_STATS_CB cb, int doClear, int *err) \r
-{\r
-nwal_RetValue ret;\r
-NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;\r
-NetapiNwalTransInfo_t *pTransInfo;\r
-nwal_TransID_t transId;\r
-if ((!n) || (!cb)) {*err = NETAPI_ERR_BAD_INPUT; return ;}\r
-*err =0;\r
-\r
-\r
-pTransInfo = netapip_GetFreeTransInfo((NETAPI_GLOBAL_T *) n->global, &transId);\r
-if (!pTransInfo) { *err = NETAPI_ERR_BUSY; return ;}\r
-pTransInfo->transType = NETAPI_NWAL_HANDLE_STAT_REQUEST;\r
-pTransInfo->netapi_handle = h;\r
-n->nwal_local.stats_cb = cb;\r
-ret = nwal_getPAStats( ((NETAPI_GLOBAL_T *) n->global)->nwal_context.nwalInstHandle,\r
- transId,\r
- NULL,\r
- doClear);\r
-if(ret != nwal_OK)\r
-{\r
- pTransInfo->inUse = nwal_FALSE;\r
- *err = NETAPI_ERR_BUSY; //no resources??\r
- printf("> netcp_cfg reqStats failed, err=%d\n",ret);\r
-}\r
-\r
-}\r
-/*****************************************************************\r
- * CREATE A MAC INTERFACE\r
- ****************************************************************/\r
-NETCP_CFG_MACIF_T netcp_cfgCreateMacInterface(\r
- NETAPI_T h,\r
- uint8_t *p_mac,\r
- int iface_no, \r
- int switch_port, \r
- NETCP_CFG_ROUTE_HANDLE_T route,\r
- NETCP_CFG_VLAN_T vlan, //future\r
- int state, //0=down, 1=up //ignored\r
- int * err\r
- )\r
-{\r
-NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;\r
-nwalMacParam_t MacInfo= {\r
- 0, /* validParams */\r
- 0, /* ifNum */\r
- 0, /* vlanId */\r
- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, /* Local mac */\r
- NWAL_MATCH_ACTION_CONTINUE_NEXT_ROUTE, /* Continue parsing to next route for match */\r
- NWAL_NEXT_ROUTE_FAIL_ACTION_HOST, /* For next route fail action by default is route to host */\r
- CPPI_PARAM_NOT_SPECIFIED, /* Use default flow configured to NWAL if packet is routed to host */\r
- QMSS_PARAM_NOT_SPECIFIED /* Use default queue configured to NWAL if packet is routed to host */\r
-};\r
-\r
-nwal_RetValue retValue;\r
-NetapiNwalTransInfo_t *pTransInfo;\r
-nwal_TransID_t trans_id;\r
-\r
- if ((!n) || (!p_mac)) {*err = NETAPI_ERR_BAD_INPUT; return -1;}\r
- *err =0;\r
-\r
- pTransInfo = netapip_GetFreeTransInfo((NETAPI_GLOBAL_T *) n->global, &trans_id);\r
- if (!pTransInfo) { *err = NETAPI_ERR_BUSY; return -1;}\r
- pTransInfo->transType = NETAPI_NWAL_HANDLE_TRANS_MAC;\r
- pTransInfo->netapi_handle = h; \r
-\r
- /* set up MacInfo */\r
- memcpy(&MacInfo.macAddr,p_mac,6); \r
- /* todo: vlan */\r
- MacInfo.ifNum = switch_port; /* todo: check for 0/1 relative*/\r
-\r
- if (route != NULL)\r
- {\r
- netcp_cfgp_build_route(route,&MacInfo.appRxPktFlowId, &MacInfo.appRxPktQueue);\r
- }\r
- pTransInfo->state == NETAPI_NWAL_HANDLE_STATE_OPEN_PENDING;\r
- retValue = nwal_setMacIface( ((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,\r
- trans_id,\r
- (nwal_AppId) (NETAPI_NETCP_MATCH_GENERIC_MAC | iface_no),\r
- &MacInfo,\r
- &pTransInfo->handle);\r
- if(retValue != nwal_OK)\r
- {\r
- *err = NETAPI_ERR_NWAL_ERR0;\r
- printf (">netcp cfg - ERROR: nwal_setMacIface returned Error Code %d\n",\r
- retValue);\r
- pTransInfo->inUse = nwal_FALSE;\r
- return -1;\r
- }\r
- //pTransInfo->inUse = nwal_FALSE;\r
-\r
- //wait here until its done since scheduler isn't running yet most likely..\r
- // todo: make this handled by scheduler poll later ??\r
- if(trans_id != NWAL_TRANSID_SPIN_WAIT)\r
- {\r
- n->nwal_local.numPendingCfg++;\r
- while ((volatile) n->nwal_local.numPendingCfg)\r
- {\r
- // if response is there, then this poll squirts out in the CTl poll callback, \r
- // which handles the rest (including decrmenting #pending!!\r
- nwal_pollCtl(((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,NULL,NULL);\r
- }\r
- }\r
- printf (">netcp cfg: MAC i/f %d added\n", iface_no);\r
- netcp_cfgp_insert_mac(&netapi_get_global()->nwal_context, \r
- p_mac, iface_no, state,vlan,\r
- (void *) pTransInfo->handle);\r
- pTransInfo->state = NETAPI_NWAL_HANDLE_STATE_IDLE;\r
- pTransInfo->inUse = nwal_FALSE;\r
- return (NETAPI_NETCP_MATCH_GENERIC_MAC | iface_no);\r
-}\r
-\r
-\r
-/*****************************************************************/\r
-/***************Delete a mac interface****************************/\r
-/*****************************************************************/\r
-void netcp_cfgDelMac(NETAPI_T h,int iface_no, int *err)\r
-{\r
- nwal_RetValue ret;\r
- NetapiNwalTransInfo_t *pTransInfo;\r
- nwal_TransID_t trans_id;\r
- NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;\r
- void * ifHandle;\r
-\r
- //get the nwal handle assoicated with this iface\r
- ifHandle = netcp_cfgp_get_mac_handle(&netapi_get_global()->nwal_context, iface_no );\r
- if(!ifHandle) \r
- {*err = NETAPI_ERR_BAD_INPUT; return ;}\r
- *err =0;\r
- \r
- //get a transaction id\r
- pTransInfo = netapip_GetFreeTransInfo((NETAPI_GLOBAL_T *) n->global, &trans_id);\r
- if (!pTransInfo) { *err = NETAPI_ERR_BUSY; return ;}\r
- pTransInfo->transType = NETAPI_NWAL_HANDLE_TRANS_MAC;\r
- pTransInfo->netapi_handle = h;\r
- //issue request\r
- ret = nwal_delMacIface(\r
- ((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,\r
- trans_id,\r
- ifHandle);\r
- if(ret != nwal_OK)\r
- {\r
- *err = NETAPI_ERR_NWAL_ERR0;\r
- printf (">netcp cfg - ERROR: nwal_delMacIface returned Error Code %d\n",\r
- ret);\r
- pTransInfo->inUse = nwal_FALSE;\r
- return ;\r
- }\r
- //wait here until its done since scheduler isn't running yet most likely..\r
- // todo: make this handled by scheduler poll later ??\r
- if(trans_id != NWAL_TRANSID_SPIN_WAIT)\r
- {\r
- n->nwal_local.numPendingCfg++;\r
- while ((volatile) n->nwal_local.numPendingCfg)\r
- {\r
- // if response is there, then this poll squirts out in the CTl poll callback, \r
- // which handles the rest (including decrmenting #pending!!\r
- nwal_pollCtl(((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,NULL,NULL);\r
- }\r
- }\r
- printf (">netcp cfg: MAC i/f %d deleted\n",iface_no);\r
- pTransInfo->state = NETAPI_NWAL_HANDLE_STATE_IDLE;\r
- pTransInfo->inUse = nwal_FALSE;\r
- //zap the entry\r
- netcp_cfgp_delete_mac(&netapi_get_global()->nwal_context, iface_no);\r
- return ;\r
-}\r
-\r
-\r
-/*****************************************************************/\r
-/***************Add IP to MAC interface (internal)****************/\r
-/*****************************************************************/\r
-static NETCP_CFG_IP_T netcp_cfgAddIpInternal(\r
- NETAPI_T h,\r
- int iface_no,\r
- nwal_IpType ipType,\r
- nwalIpAddr_t * ip_addr,\r
- nwalIpOpt_t * ip_qualifiers,\r
- NETCP_CFG_ROUTE_HANDLE_T route, //NULL for default\r
- int * err,\r
- int flag) //TRUE: add IP to iface. False: add IP as part of classifier\r
-{\r
-NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;\r
-void * n_handle=NULL;\r
-nwalIpParam_t nwalIpParam= {\r
- pa_IPV4, /* IP Type */\r
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* Dest IP */\r
- { 0x0,0,0,0},/* IP Options */\r
- NWAL_MATCH_ACTION_CONTINUE_NEXT_ROUTE, /* Continue parsing to next route for match */\r
- NWAL_NEXT_ROUTE_FAIL_ACTION_HOST, /* For next route fail action by default is route to host */\r
- CPPI_PARAM_NOT_SPECIFIED, /* Use default flow configured to NWAL if packet is routed to host */\r
- QMSS_PARAM_NOT_SPECIFIED /* Use default queue configured to NWAL if packet is routed to host */\r
-};\r
-nwal_RetValue retValue;\r
-NetapiNwalTransInfo_t *pTransInfo;\r
-nwal_TransID_t trans_id;\r
-int ip_slot=-1;\r
-NETCP_CFG_IP_T ip_rule_id;\r
-NETCP_CFG_IP_T temp;\r
-\r
- //verify that iface has been configured \r
- if (iface_no != NETCP_CFG_NO_INTERFACE)\r
- {\r
- if ((iface_no<0) || (iface_no>= TUNE_NETAPI_MAX_INTERFACES)) {*err = NETAPI_ERR_BAD_INPUT; return -1;}\r
- }\r
-\r
- if (iface_no != NETCP_CFG_NO_INTERFACE)\r
- {\r
- if(netapi_get_global()->nwal_context.interfaces[iface_no].in_use)\r
- {\r
- n_handle = netapi_get_global()->nwal_context.interfaces[iface_no].nwal_handle;\r
- }\r
- else\r
- {\r
- *err = NETAPI_ERR_BAD_INPUT;\r
- return -1;\r
- }\r
- }\r
- if (flag) //if adding IP to MAC then reserve a slot to save info\r
- {\r
- //find free slot for IP & reserve\r
- ip_slot= netcp_cfgp_find_ip_slot(&netapi_get_global()->nwal_context, \r
- iface_no);\r
- if (ip_slot <0) \r
- {\r
- *err= NETAPI_ERR_NOMEM; //no room \r
- return -1;\r
- }\r
- }\r
-\r
- //get a transaction object for config action\r
- pTransInfo = netapip_GetFreeTransInfo((NETAPI_GLOBAL_T *) n->global, &trans_id);\r
- if (!pTransInfo) { *err = NETAPI_ERR_BUSY; return -1;}\r
- pTransInfo->transType = NETAPI_NWAL_HANDLE_TRANS_IP;\r
- pTransInfo->netapi_handle = h;\r
-\r
- //build nwalIpParam\r
- memcpy(&nwalIpParam.locIpAddr,ip_addr, sizeof(nwalIpAddr_t));\r
- nwalIpParam.ipType=ipType;\r
- if(route)\r
- {\r
- netcp_cfgp_build_route(route,&nwalIpParam.appRxPktFlowId, &nwalIpParam.appRxPktQueue);\r
- } \r
- else{} //use nwal defaults\r
- if (ip_qualifiers)\r
- memcpy(&nwalIpParam.ipOpt,ip_qualifiers, sizeof(nwalIpOpt_t)); \r
- else\r
- memset(&nwalIpParam.ipOpt,0, sizeof(nwalIpOpt_t));\r
-\r
- //build the rule id that will be returned when a packet matches \r
- if (flag)\r
- ip_rule_id = NETAPI_NETCP_MATCH_GENERIC_IP | iface_no | ((ip_slot&&0xff)<<8);\r
- else\r
- ip_rule_id = (NETAPI_NETCP_MATCH_CLASS_L3 | iface_no);\r
-\r
- //perform config action\r
- pTransInfo->state == NETAPI_NWAL_HANDLE_STATE_OPEN_PENDING;\r
- retValue = nwal_setIPAddr( netapi_get_global()->nwal_context.nwalInstHandle,\r
- trans_id,\r
- (nwal_AppId) (ip_rule_id),\r
- n_handle,\r
- &nwalIpParam,\r
- &pTransInfo->handle);\r
-\r
- if(retValue != nwal_OK)\r
- {\r
- *err = NETAPI_ERR_NWAL_ERR0;\r
- printf (">netcp cfg: nwal_setIP returned Error Code %d\n",\r
- retValue);\r
- pTransInfo->inUse = nwal_FALSE;\r
- //zap the entry\r
- if (flag)\r
- {\r
- netcp_cfgp_delete_ip(&netapi_get_global()->nwal_context,\r
- iface_no,\r
- ip_slot);\r
- }\r
- return -1;\r
- }\r
- //wait here until its done since scheduler isn't running yet most likely..\r
- // todo: make this handled by scheduler poll later ??\r
- if(trans_id != NWAL_TRANSID_SPIN_WAIT)\r
- {\r
- n->nwal_local.numPendingCfg++;\r
- while ((volatile) n->nwal_local.numPendingCfg)\r
- {\r
- // if response is there, then this poll squirts out in the CTl poll callback, \r
- // which handles the rest (including decrmenting #pending!!\r
- nwal_pollCtl(((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,NULL,NULL);\r
- }\r
- }\r
- if (flag)\r
- {\r
- printf (">netcp cfg: IP added to interface %d (slot%d)\n", iface_no, ip_slot);\r
- netcp_cfgp_insert_ip(&netapi_get_global()->nwal_context, ipType, \r
- ip_addr, ip_qualifiers, iface_no, ip_slot,\r
- pTransInfo->handle);\r
- }\r
- pTransInfo->state = NETAPI_NWAL_HANDLE_STATE_IDLE;\r
- temp = (NETCP_CFG_IP_T) pTransInfo->handle;\r
- pTransInfo->inUse = nwal_FALSE;\r
- return (flag ? ip_rule_id: temp);\r
-}\r
-/*****************************************************************/\r
-/***************Add IP to MAC interface **************************/\r
-/*****************************************************************/\r
-NETCP_CFG_IP_T netcp_cfgAddIp(\r
- NETAPI_T h,\r
- int iface_no,\r
- nwal_IpType ipType,\r
- nwalIpAddr_t * ip_addr,\r
- nwalIpOpt_t * ip_qualifiers,\r
- NETCP_CFG_ROUTE_HANDLE_T route, //NULL for default\r
- int * err\r
- )\r
-{\r
- return netcp_cfgAddIpInternal(\r
- h, iface_no, ipType, ip_addr, ip_qualifiers, route, err, \r
- 1);\r
-}\r
-\r
-/*****************************************************************/\r
-/***************Delete an attached IP*****************************/\r
-/*****************************************************************/\r
-static void netcp_cfgDelIpInternal(NETAPI_T h, int iface_no, nwal_IpType ipType,\r
- nwalIpAddr_t * ip_addr,\r
- nwalIpOpt_t * ip_qualifiers, \r
- NETCP_CFG_IP_T ip_rule_id,\r
- int *err, \r
- void * handle, /* if flag==0, handle must be valid */\r
- int flag) /* flag==0 => delete IP rule that was part of classifier, not interface */\r
-{\r
- nwal_RetValue ret;\r
- NetapiNwalTransInfo_t *pTransInfo;\r
- nwal_TransID_t trans_id;\r
- NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;\r
- void * ifHandle;\r
- int ip_slot = (ip_rule_id>>8)&0xff;\r
-\r
- //get the nwal handle assoicated with this ip \r
- if (flag)\r
- {\r
- ifHandle = netcp_cfgp_get_ip_handle(\r
- &netapi_get_global()->nwal_context, iface_no,\r
- ip_slot );\r
- }\r
- else \r
- {\r
- ifHandle = handle;\r
- }\r
- if(!ifHandle)\r
- {*err = NETAPI_ERR_BAD_INPUT; return ;}\r
- *err =0;\r
-\r
- //get a transaction id\r
- pTransInfo = netapip_GetFreeTransInfo((NETAPI_GLOBAL_T *) n->global, &trans_id);\r
- if (!pTransInfo) { *err = NETAPI_ERR_BUSY; return ;}\r
- pTransInfo->transType = NETAPI_NWAL_HANDLE_TRANS_IP;\r
- pTransInfo->netapi_handle = h;\r
- //issue request\r
- ret = nwal_delIPAddr(\r
- ((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,\r
- trans_id,\r
- ifHandle);\r
- if(ret != nwal_OK)\r
- {\r
- *err = NETAPI_ERR_NWAL_ERR0;\r
- printf (">netcp cfg - ERROR: nwal_delMacIface returned Error Code %d\n",\r
- ret);\r
- pTransInfo->inUse = nwal_FALSE;\r
- return ;\r
- }\r
- //wait here until its done since scheduler isn't running yet most likely..\r
- // todo: make this handled by scheduler poll later ??\r
- if(trans_id != NWAL_TRANSID_SPIN_WAIT)\r
- {\r
- n->nwal_local.numPendingCfg++;\r
- while ((volatile) n->nwal_local.numPendingCfg)\r
- {\r
- // if response is there, then this poll squirts out in the CTl poll callback, \r
- // which handles the rest (including decrmenting #pending!!\r
- nwal_pollCtl(((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,NULL,NULL);\r
- }\r
- }\r
- if (flag)\r
- printf (">netcp cfg: attached IP deleted\n");\r
- else\r
- printf (">netcp cfg: Classifier IP rule deleted\n");\r
- pTransInfo->state = NETAPI_NWAL_HANDLE_STATE_IDLE;\r
- pTransInfo->inUse = nwal_FALSE;\r
-\r
- //zap the entry\r
- if (flag)\r
- netcp_cfgp_delete_ip(&netapi_get_global()->nwal_context, \r
- iface_no,\r
- ip_slot);\r
- return ;\r
-}\r
-\r
-/*****************************************************************/\r
-/***************Delete an attached IP*****************************/\r
-/*****************************************************************/\r
-void netcp_cfgDelIp(NETAPI_T h, int iface_no, nwal_IpType ipType,\r
- nwalIpAddr_t * ip_addr,\r
- nwalIpOpt_t * ip_qualifiers,\r
- NETCP_CFG_IP_T ip_rule_id,\r
- int *err)\r
-{\r
- netcp_cfgDelIpInternal( h, iface_no, ipType,\r
- ip_addr, ip_qualifiers, ip_rule_id,\r
- err, NULL, 1);\r
- return;\r
-}\r
-\r
-\r
-/**\r
- * @def netcp_cfgAddClass\r
- * @brief add a classifier rule into NETCP\r
- **/\r
-NETCP_CFG_CLASS_T netcp_cfgAddClass(NETAPI_T h,\r
- NETCP_CFG_CLASSIFIER_T *p_class,\r
- NETCP_CFG_ROUTE_HANDLE_T route,\r
- int action, int * err)\r
-{\r
-NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;\r
-void * l3_handle=NULL; //ip handle\r
-nwal_RetValue retValue;\r
-NetapiNwalTransInfo_t *pTransInfo;\r
-nwal_TransID_t trans_id;\r
-int class_slot=-1;\r
-int iface_no;\r
-int ip_slot=-1;\r
-NETCP_CFG_CLASS_T classHandle; //returned by us\r
-nwal_appProtoType_t proto;\r
-nwalLocConnCfg_t tempCfg={\r
-0, //nwal_handle: to be filled in\r
-{0}, // l4 ports: to be filled in\r
-0, //core id (NA)\r
-0, //action\r
-CPPI_PARAM_NOT_SPECIFIED, //flow id\r
-QMSS_PARAM_NOT_SPECIFIED, //dest queue\r
-};\r
-\r
-if(!p_class) { *err=NETAPI_ERR_BAD_INPUT; return -1;}\r
-switch(p_class->classType)\r
-{\r
-default:\r
- printf(">netcp_cfg : classifier type %d not supported\n",p_class->classType);\r
- break;\r
-case(NETCP_CFG_CLASS_TYPE_L3_L4):\r
-case(NETCP_CFG_CLASS_TYPE_L4):\r
- //assume just type l4 only (L2, L3 defined by iface, l3 id )\r
- iface_no = p_class->u.c_l4.iface;\r
- if (p_class->classType== NETCP_CFG_CLASS_TYPE_L4)\r
- { \r
- ip_slot = (p_class->u.c_l4.ip>>8)&0xff;\r
- }\r
-\r
- //verify that iface has been configured \r
- if (iface_no != NETCP_CFG_NO_INTERFACE)\r
- {\r
- if(!netapi_get_global()->nwal_context.interfaces[iface_no].in_use)\r
- {\r
- *err = NETAPI_ERR_BAD_INPUT;\r
- return -1;\r
- }\r
- }\r
-\r
- if (p_class->classType== NETCP_CFG_CLASS_TYPE_L4)\r
- {\r
- //verify that ip has been configured and get its handle\r
- l3_handle = netcp_cfgp_get_ip_handle(\r
- &netapi_get_global()->nwal_context, iface_no,\r
- ip_slot );\r
- }\r
- else\r
- {\r
- nwalIpParam_t tempParam={\r
- pa_IPV4, /* IP Type */\r
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* Dest IP */\r
- { 0x0,0,0,0},/* IP Options */\r
- NWAL_MATCH_ACTION_CONTINUE_NEXT_ROUTE, /* Continue parsing to next route for match */\r
- NWAL_NEXT_ROUTE_FAIL_ACTION_HOST, /* For next route fail action by default is route to host */\r
- CPPI_PARAM_NOT_SPECIFIED, /* Use default flow configured to NWAL if packet is routed to host */\r
- QMSS_PARAM_NOT_SPECIFIED /* Use default queue configured to NWAL if packet is routed to host */\r
- };\r
- //build nwalIpParam\r
- memcpy(&tempParam.locIpAddr,p_class->u.c_l3_l4.ip_addr, sizeof(nwalIpAddr_t));\r
- tempParam.ipType=p_class->u.c_l3_l4.ipType;\r
- //use nwal defauls for route\r
- if (p_class->u.c_l3_l4.ip_qualifiers)\r
- memcpy(&tempParam.ipOpt,p_class->u.c_l3_l4.ip_qualifiers, sizeof(nwalIpOpt_t));\r
- else\r
- memset(&tempParam.ipOpt,0, sizeof(nwalIpOpt_t));\r
-\r
-\r
- //find if we have a matching L3 handle for IP classifier; if not create it\r
- retValue = nwal_getIPAddr (((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,\r
- &tempParam,\r
- netcp_cfgp_get_mac_handle(&netapi_get_global()->nwal_context, iface_no ),\r
- &l3_handle); \r
- if (retValue != nwal_TRUE) \r
- {\r
- int ret;\r
- //**NEW IP RULE \r
- //need to attach this IP RULE to the MAC\r
- l3_handle= (void *) netcp_cfgAddIpInternal(\r
- h, iface_no, \r
- p_class->u.c_l3_l4.ipType,\r
- p_class->u.c_l3_l4.ip_addr,\r
- p_class->u.c_l3_l4.ip_qualifiers,\r
- p_class->u.c_l3_l4.p_fail_route,\r
- &ret,\r
- FALSE);\r
- if(!ret)\r
- {\r
- l3_handle=NULL;\r
- }\r
- }\r
- } \r
- if(!l3_handle)\r
- {*err = NETAPI_ERR_BAD_INPUT; return -1 ;}\r
-\r
-\r
- //find free slot for CLASS & reserve\r
- class_slot= netcp_cfgp_find_class_slot(&netapi_get_global()->nwal_context);\r
- if(class_slot<0) {*err = NETAPI_ERR_NOMEM; return -1;}\r
- classHandle = NETAPI_NETCP_MATCH_CLASS | (class_slot<<8) | (iface_no&0xff);\r
- //build request from template\r
- tempCfg.inHandle=l3_handle;\r
- if (p_class->classType== NETCP_CFG_CLASS_TYPE_L4)\r
- { \r
- memcpy(&tempCfg.appProto,&p_class->u.c_l4.appProto,sizeof(nwalAppProto_t));\r
- proto = p_class->u.c_l4.proto;\r
- }\r
- else\r
- {\r
- memcpy(&tempCfg.appProto,&p_class->u.c_l3_l4.appProto,sizeof(nwalAppProto_t));\r
- proto = p_class->u.c_l3_l4.proto;\r
- }\r
- \r
- tempCfg.matchAction = (action==NETCP_CFG_ACTION_TO_SW) ? NWAL_MATCH_ACTION_HOST : NWAL_MATCH_ACTION_DISCARD;\r
- if (route)\r
- {\r
- netcp_cfgp_build_route(route,&tempCfg.appRxPktFlowId, &tempCfg.appRxPktQueue);\r
- }\r
-\r
- //get a transaction id\r
- pTransInfo = netapip_GetFreeTransInfo((NETAPI_GLOBAL_T *) n->global, &trans_id);\r
- if (!pTransInfo) { *err = NETAPI_ERR_BUSY; return -1 ;}\r
- pTransInfo->transType = NETAPI_NWAL_HANDLE_TRANS_IP; /* todo: fix this to TRANS_L4*/\r
- pTransInfo->netapi_handle = h;\r
- //issue request\r
- retValue = nwal_addConn(((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,\r
- trans_id,\r
- (nwal_AppId) classHandle,\r
- proto,\r
- &tempCfg,\r
- NULL,\r
- &pTransInfo->handle);\r
- if(retValue != nwal_OK)\r
- {\r
- *err = NETAPI_ERR_NWAL_ERR0;\r
- printf (">netcp cfg - ERROR: nwal_addConn returned Error Code %d\n",\r
- retValue);\r
- pTransInfo->inUse = nwal_FALSE;\r
- netcp_cfgp_delete_class(&netapi_get_global()->nwal_context, class_slot);\r
- return -1;\r
- }\r
- //wait here until its done since scheduler isn't running yet most likely..\r
- // todo: make this handled by scheduler poll later ??\r
- if(trans_id != NWAL_TRANSID_SPIN_WAIT)\r
- {\r
- n->nwal_local.numPendingCfg++;\r
- while ((volatile) n->nwal_local.numPendingCfg)\r
- {\r
- // if response is there, then this poll squirts out in the CTl poll callback, \r
- // which handles the rest (including decrmenting #pending!!\r
- nwal_pollCtl(((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,NULL,NULL);\r
- }\r
- }\r
- printf (">netcp cfg: L4 Classifer added to interface %d ip %d (slot%d)\n", iface_no, ip_slot, class_slot);\r
- netcp_cfgp_insert_class(&netapi_get_global()->nwal_context, \r
- class_slot,\r
- p_class->classType, \r
- NULL, //L2 we have\r
- (p_class->classType== NETCP_CFG_CLASS_TYPE_L3_L4? l3_handle : NULL),\r
- pTransInfo->handle);\r
- pTransInfo->state = NETAPI_NWAL_HANDLE_STATE_IDLE;\r
- return classHandle;\r
-} //end switch\r
-return -1;\r
-}\r
-\r
-//delete classifier\r
-void netcp_cfgDelClass(NETAPI_T h,\r
- NETCP_CFG_CLASS_T classId,\r
- int *err)\r
-{\r
-NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;\r
-void * L4_handle; //class handle -> L4\r
-void * L3_handle; //class handle -> L3\r
-nwal_RetValue retValue;\r
-NetapiNwalTransInfo_t *pTransInfo;\r
-nwal_TransID_t trans_id;\r
-int class_slot=-1;\r
-//int iface;\r
-//int ip_slot;\r
-\r
- class_slot = (classId>>8)&0xffff;\r
- L4_handle=netcp_cfgp_get_l4_handle(\r
- &netapi_get_global()->nwal_context,\r
- class_slot );\r
- if(!L4_handle) {*err = NETAPI_ERR_BAD_INPUT; return ;}\r
- L3_handle = netcp_cfgp_get_l3_handle(\r
- &netapi_get_global()->nwal_context,\r
- class_slot );\r
- /* l3 handle might be NULL,, depending on type of classifier */\r
-\r
- netcp_cfgp_delete_class(\r
- &netapi_get_global()->nwal_context,\r
- class_slot );\r
- //get a transaction id\r
- pTransInfo = netapip_GetFreeTransInfo((NETAPI_GLOBAL_T *) n->global, &trans_id);\r
- if (!pTransInfo) { *err = NETAPI_ERR_BUSY; return ;}\r
- pTransInfo->transType = NETAPI_NWAL_HANDLE_TRANS_IP;\r
- pTransInfo->netapi_handle = h;\r
- //issue request for L4\r
- retValue = nwal_delConn(((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,\r
- trans_id,\r
- L4_handle);\r
- if(retValue != nwal_OK)\r
- {\r
- *err = NETAPI_ERR_NWAL_ERR0;\r
- printf (">netcp cfg - ERROR: nwal_delConn returned Error Code %d\n",\r
- retValue);\r
- pTransInfo->inUse = nwal_FALSE;\r
- return ; /* todo: what about the L3? */\r
- }\r
- //wait here until its done since scheduler isn't running yet most likely..\r
- // todo: make this handled by scheduler poll later ??\r
- if(trans_id != NWAL_TRANSID_SPIN_WAIT)\r
- {\r
- n->nwal_local.numPendingCfg++;\r
- while ((volatile) n->nwal_local.numPendingCfg)\r
- {\r
- // if response is there, then this poll squirts out in the CTl poll callback, \r
- // which handles the rest (including decrmenting #pending!!\r
- nwal_pollCtl(((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,NULL,NULL);\r
- }\r
- }\r
- printf (">netcp cfg: Classifer deleted\n");\r
- pTransInfo->state = NETAPI_NWAL_HANDLE_STATE_IDLE;\r
- pTransInfo->inUse = nwal_FALSE;\r
-\r
- /* delete L3 if we have to */\r
- if (L3_handle)\r
- {\r
- netcp_cfgDelIpInternal( h, 0, 0,\r
- NULL, NULL, 0,\r
- err, L3_handle, 0);\r
- }\r
- return ;\r
-}\r
-\r
-\r
-/*--------------flow management--------*/\r
-// ADD A Flow\r
-NETCP_CFG_FLOW_HANDLE_T netcp_cfgAddFlow(NETAPI_T h,\r
- int n,\r
- Pktlib_HeapHandle handles[],\r
- int sizes[],\r
- int byte_offset,\r
- int * err )\r
-{\r
- Cppi_RxFlowCfg rxFlowCfg;\r
- Uint8 isAlloc;\r
- Qmss_QueueHnd rxBufQ[TUNE_NETAPI_MAX_BUF_POOLS_IN_FLOW];\r
- Uint32 rxBufSize[TUNE_NETAPI_MAX_BUF_POOLS_IN_FLOW];\r
- int i;\r
- Cppi_FlowHnd FlowHnd;\r
- int slot;\r
- NETCP_CFG_FLOW_HANDLE_T retVal;\r
-\r
- *err= 0; /* ok */\r
- //get a slot to save new flow\r
- slot = netcp_cfgp_find_flow_slot(&netapi_get_global()->nwal_context);\r
- if (slot<0) { *err= NETAPI_ERR_NOMEM; return NULL; }\r
-\r
- //configure flow\r
- memset(&rxFlowCfg,0,sizeof(Cppi_RxFlowCfg));\r
- for (i = 0; i < TUNE_NETAPI_MAX_BUF_POOLS_IN_FLOW; i++)\r
- {\r
- if (i >= n)\r
- {\r
- rxBufQ[i] = 0;\r
- rxBufSize[i] = 0;\r
- } else\r
- {\r
- rxBufQ[i] = Pktlib_getInternalHeapQueue(handles[i]);\r
- //todo: verity sizes< heapsize\r
- //todo: verify order\r
- rxBufSize[i]= sizes[i];\r
- }\r
- if (i && (rxBufQ[i] <= 0))\r
- {\r
- rxBufQ[i] = rxBufQ[i-1];\r
- rxBufSize[i] = 0;\r
- }\r
- }\r
- /* Configure Rx flow */\r
- rxFlowCfg.flowIdNum = CPPI_PARAM_NOT_SPECIFIED;\r
- rxFlowCfg.rx_dest_qnum = 100; //DANGEROUS> TODO PUT VALID Q HERE\r
- rxFlowCfg.rx_dest_qmgr = 0;\r
- rxFlowCfg.rx_sop_offset = byte_offset;\r
- rxFlowCfg.rx_ps_location = Cppi_PSLoc_PS_IN_DESC;\r
- rxFlowCfg.rx_desc_type = Cppi_DescType_HOST;\r
- rxFlowCfg.rx_error_handling = 0;\r
-\r
- rxFlowCfg.rx_psinfo_present = 1;\r
- rxFlowCfg.rx_einfo_present = 1;\r
-\r
- rxFlowCfg.rx_dest_tag_lo = 0;\r
- rxFlowCfg.rx_dest_tag_hi = 0;\r
- rxFlowCfg.rx_src_tag_lo = 0;\r
- rxFlowCfg.rx_src_tag_hi = 0;\r
-\r
- rxFlowCfg.rx_size_thresh0_en = rxBufSize[1] ? 1 : 0;\r
- rxFlowCfg.rx_size_thresh1_en = rxBufSize[2] ? 1 : 0;\r
- rxFlowCfg.rx_size_thresh2_en = rxBufSize[3] ? 1 : 0;\r
-\r
- rxFlowCfg.rx_dest_tag_lo_sel = 0;\r
- rxFlowCfg.rx_dest_tag_hi_sel = 0;\r
- rxFlowCfg.rx_src_tag_lo_sel = 0;\r
- rxFlowCfg.rx_src_tag_hi_sel = 0;\r
-\r
- rxFlowCfg.rx_fdq1_qnum = rxBufQ[1];\r
- rxFlowCfg.rx_fdq1_qmgr = 0;\r
- rxFlowCfg.rx_fdq2_qnum = rxBufQ[2];\r
-\r
- rxFlowCfg.rx_fdq2_qmgr = 0;\r
- rxFlowCfg.rx_fdq3_qnum = rxBufQ[3];\r
-\r
- rxFlowCfg.rx_fdq3_qmgr = 0;\r
-\r
- rxFlowCfg.rx_size_thresh0 = rxBufSize[1] ? rxBufSize[0] : 0;\r
- rxFlowCfg.rx_size_thresh1 = rxBufSize[2] ? rxBufSize[1] : 0;\r
- rxFlowCfg.rx_size_thresh2 = rxBufSize[3] ? rxBufSize[2] : 0;\r
-\r
- rxFlowCfg.rx_fdq0_sz0_qnum = rxBufQ[0];\r
- rxFlowCfg.rx_fdq0_sz0_qmgr = 0;\r
- rxFlowCfg.rx_fdq0_sz1_qnum = rxBufQ[1];\r
- rxFlowCfg.rx_fdq0_sz1_qmgr = 0;\r
- rxFlowCfg.rx_fdq0_sz2_qnum = rxBufQ[2];\r
- rxFlowCfg.rx_fdq0_sz2_qmgr = 0;\r
- rxFlowCfg.rx_fdq0_sz3_qnum = rxBufQ[3];\r
- rxFlowCfg.rx_fdq0_sz3_qmgr = 0;\r
-\r
- {\r
- //todo: replace this with a nwal call to get global cntx info\r
- Cppi_CpDmaInitCfg cpdmaCfg;\r
- memset(&cpdmaCfg,0,sizeof(Cppi_CpDmaInitCfg));\r
- cpdmaCfg.dmaNum = Cppi_CpDma_PASS_CPDMA;\r
- FlowHnd =\r
- Cppi_configureRxFlow (Cppi_open (&cpdmaCfg), &rxFlowCfg, &isAlloc);\r
-}\r
- if (FlowHnd == NULL)\r
- {\r
- *err= NETAPI_ERR_NORES;\r
- netcp_cfgp_delete_flow(&netapi_get_global()->nwal_context, slot);\r
- return (NULL);\r
- }\r
-\r
- //update slot\r
- retVal = netcp_cfgp_insert_flow(&netapi_get_global()->nwal_context, slot, (void*) FlowHnd);\r
- printf(">netcp cfg: flow %d created\n", ((NETCP_CFG_FLOW_T *) retVal)->flowid);\r
- return ( retVal);\r
-\r
-\r
-\r
-}\r
-\r
-//Delete a flow\r
-void netcp_cfgDelFlow(NETAPI_T h , NETCP_CFG_FLOW_HANDLE_T f , int * err)\r
-{\r
- int slot;\r
- void * handle;\r
- *err=0;\r
- /* find entry */\r
- slot = netcp_cfgp_find_flow(&netapi_get_global()->nwal_context, ((NETCP_CFG_FLOW_T *) f) ->flowid, &handle);\r
- if (slot<0) {*err = NETAPI_ERR_BAD_INPUT; return;}\r
-\r
- Cppi_closeRxFlow( (Cppi_FlowHnd) handle);\r
- netcp_cfgp_delete_flow(&netapi_get_global()->nwal_context, slot);\r
- printf(">netcp cfg: flow %d deleted\n", ((NETCP_CFG_FLOW_T *) f)->flowid);\r
- return;\r
-}\r
-\r
-\r
-/*************************************************************************/\r
-/*********************************INTERNAL*******************************/\r
-/************************************************************************/\r
-\r
-/***************************************************************\r
- ********************METCP CMD Reply Callback******************\r
- ***************************************************************/\r
-void netapi_NWALCmdCallBack (nwal_AppId appHandle,\r
- uint16_t trans_id,\r
- nwal_RetValue ret)\r
-{\r
- NetapiNwalTransInfo_t * p_trans;\r
- NETAPI_NWAL_LOCAL_CONTEXT_T *p_local=NULL;\r
-\r
- if(trans_id == NWAL_TRANSID_SPIN_WAIT)\r
- {\r
- netapi_get_global()->nwal_context.numBogusTransIds++;\r
- return;\r
- }\r
-\r
- p_trans= &netapi_get_global()->nwal_context.transInfos[trans_id];\r
- p_local =&((NETAPI_HANDLE_T*) (p_trans->netapi_handle))->nwal_local;\r
-\r
- if(ret != nwal_OK)\r
- {\r
- printf (">netcp cfg : NWALCmdCallBack returned Error Code %d\n",\r
- ret);\r
- //todo: atomic inc\r
- netapi_get_global()->nwal_context.numCmdFail++;\r
- }\r
- else\r
- {\r
- //todo: atomic inc\r
- netapi_get_global()->nwal_context.numCmdPass++;\r
- switch(p_trans->transType)\r
- {\r
- case NETAPI_NWAL_HANDLE_TRANS_MAC:\r
- {\r
- if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_OPEN_PENDING)\r
- {\r
- p_trans->state =NETAPI_NWAL_HANDLE_STATE_OPEN;\r
- }\r
- else if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_CLOSE_PENDING)\r
- {\r
- p_trans->state =NETAPI_NWAL_HANDLE_STATE_IDLE;\r
- }\r
- break;\r
- }\r
- case NETAPI_NWAL_HANDLE_TRANS_IP:\r
- {\r
- if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_OPEN_PENDING)\r
- {\r
- p_trans->state =NETAPI_NWAL_HANDLE_STATE_OPEN;\r
- }\r
- else if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_CLOSE_PENDING)\r
- {\r
- p_trans->state =NETAPI_NWAL_HANDLE_STATE_IDLE;\r
- }\r
- break;\r
- }\r
- case NETAPI_NWAL_HANDLE_TRANS_PORT:\r
- {\r
- if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_OPEN_PENDING)\r
- {\r
- p_trans->state =NETAPI_NWAL_HANDLE_STATE_OPEN;\r
- }\r
- else if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_CLOSE_PENDING)\r
- {\r
- p_trans->state =NETAPI_NWAL_HANDLE_STATE_IDLE;\r
- }\r
- break;\r
- }\r
- case NETAPI_NWAL_HANDLE_TRANS_SA:\r
- {\r
- if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_OPEN_PENDING)\r
- {\r
- p_trans->state =NETAPI_NWAL_HANDLE_STATE_OPEN;\r
- }\r
- else if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_CLOSE_PENDING)\r
- {\r
- p_trans->state =NETAPI_NWAL_HANDLE_STATE_IDLE;\r
- }\r
- break;\r
- }\r
- case NETAPI_NWAL_HANDLE_TRANS_SA_POLICY:\r
- {\r
- if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_OPEN_PENDING)\r
- {\r
- p_trans->state =NETAPI_NWAL_HANDLE_STATE_OPEN;\r
- }\r
- else if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_CLOSE_PENDING)\r
- {\r
- p_trans->state =NETAPI_NWAL_HANDLE_STATE_IDLE;\r
- }\r
- break;\r
- }\r
- default:\r
- {\r
- printf ("netcp cfg> Invalid transaction type %d for trans_id: %d\n",\r
- p_trans->transType,trans_id);\r
- break;\r
- }\r
- }\r
- }\r
-\r
- p_local->numPendingCfg--;\r
-\r
- if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_IDLE)\r
- {\r
- p_trans->inUse = nwal_FALSE;\r
- }\r
-\r
-}\r
-\r
-\r
-/*******************************************************/\r
-/**************stats reply callback**********************/\r
-/*******************************************************/\r
-void netapi_NWALCmdPaStatsReply (nwal_AppId appHandle,\r
- nwal_TransID_t trans_id,\r
- paSysStats_t *stats)\r
-{\r
- NetapiNwalTransInfo_t * p_trans;\r
- NETAPI_NWAL_LOCAL_CONTEXT_T *p_local=NULL;\r
- \r
- if(trans_id == NWAL_TRANSID_SPIN_WAIT)\r
- {\r
- netapi_get_global()->nwal_context.numBogusTransIds++;\r
- return;\r
- }\r
-\r
- p_trans= &netapi_get_global()->nwal_context.transInfos[trans_id];\r
- p_trans->inUse = nwal_FALSE;\r
- p_local =&((NETAPI_HANDLE_T*) (p_trans->netapi_handle))->nwal_local;\r
-\r
- //save a local copy of some stuff*/\r
- p_local->numL2PktsRecvd=stats->classify1.nPackets;\r
- p_local->numL3PktsRecvd=stats->classify1.nIpv4Packets;\r
-#if 0\r
- p_local->numL4PktsRecvd=stats->;\r
- p_local->numL4PktsSent=stats->;\r
- p_local->TxErrDrop=stats->;\r
-#endif\r
- //callout result to application !!\r
- if (p_local->stats_cb) (*p_local->stats_cb)(p_trans->netapi_handle,stats);\r
- \r
-} \r
-\r
+/**********************************************************
+ * file: netcp_cfg.c
+ * purpose: netcp configurations routines
+ **************************************************************
+ * FILE: netcp_cfg.c
+ *
+ * DESCRIPTION: netcp configuration main source file for user space transport
+ * library
+ *
+ * REVISION HISTORY: rev 0.0.1
+ *
+ * Copyright (c) Texas Instruments Incorporated 2010-2011
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the
+ * distribution.
+ *
+ * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ******************************************************/
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include "netapi.h"
+#include "netcp_cfg.h"
+#include "netapi_loc.h"
+
+/******************************************************************
+ ********************Utility*************************************
+*******************************************************************/
+
+
+//get a free transaction id
+NetapiNwalTransInfo_t * netapip_GetFreeTransInfo(NETAPI_GLOBAL_T *p_global, nwal_TransID_t *pTransId)
+{
+ uint16_t count=0;
+
+ count=0;
+ while(count < TUNE_NETAPI_MAX_NUM_TRANS)
+ {
+ if((p_global->nwal_context.transInfos[count].inUse) != nwal_TRUE)
+ {
+ p_global->nwal_context.transInfos[count].inUse = nwal_TRUE;
+ *pTransId = count;
+ return(&p_global->nwal_context.transInfos[count]);
+ }
+ count++;
+ }
+
+ /* trouble. need to wait for one to free up*/
+ /* to do: handle this by forcing a poll of cntrl queue*/
+ printf(">netcp_cfg: trying to get free transaction slot but all full!!\n");
+ return NULL;
+
+}
+//internal: build route
+void netcp_cfgp_build_route(NETCP_CFG_ROUTE_T * p_route, int16_t * p_flow, Qmss_QueueHnd * p_q)
+{
+ if (!p_route) return;
+ if (p_route->p_flow) *p_flow= p_route->p_flow->flowid;
+ else *p_flow = CPPI_PARAM_NOT_SPECIFIED;
+ if (p_route->p_dest_q) *p_q = pktio_get_q(p_route->p_dest_q);
+ else *p_q=QMSS_PARAM_NOT_SPECIFIED;
+}
+/*-----------------------------------------------------------*/
+/*----------------database management stuff-------------------*/
+/*-----------------------------------------------------------*/
+
+/*=====================Policies=============================*/
+//internal: find a free slot for an SA
+int netcp_cfgp_find_policy_slot( NETAPI_NWAL_GLOBAL_CONTEXT_T *p, int tunnel)
+{
+ int i;
+ if ((tunnel <0 ) || (tunnel >=TUNE_NETAPI_MAX_SA)) return -1;
+
+ //find a free entry
+ for(i=0;i<TUNE_NETAPI_MAX_POLICY;i++)
+ {
+ if (!p->policy[i].in_use)
+ {
+ p->policy[i].in_use = 2; //pending
+ p->policy[i].tunnel= tunnel; //save tunnel this is linked to
+ return i;
+ }
+ }
+ return -1;
+}
+
+//internal: delete a policy from list
+void netcp_cfgp_delete_policy(
+ NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int policy_slot )
+{
+ if ((policy_slot <0 ) || (policy_slot >= TUNE_NETAPI_MAX_POLICY))
+ {
+ return ;
+ }
+ p->policy[policy_slot].in_use=0;
+ return;
+}
+
+//internal: insert an policy into the list
+void netcp_cfgp_insert_policy(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int policy_slot, //we 'reserved it already'
+ void * handle)
+{
+ p->policy[policy_slot].in_use=1;
+ p->policy[policy_slot].nwal_handle = handle;
+ return;
+}
+
+//internal: return nwal_handle for policy
+void *netcp_cfgp_get_policy( NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int policy_slot)
+{
+ if ((policy_slot <0 ) || (policy_slot >= TUNE_NETAPI_MAX_POLICY)) return NULL;
+ if (!p->policy[policy_slot].in_use) return NULL;
+ return p->policy[policy_slot].nwal_handle;
+}
+
+
+
+/*======================SAs==================================*/
+//internal: find a free slot for an SA
+int netcp_cfgp_find_sa_slot( NETAPI_NWAL_GLOBAL_CONTEXT_T *p, int iface)
+{
+ int i;
+ if (iface != NETCP_CFG_NO_INTERFACE)
+ {
+ if ((iface <0 ) || (iface >=TUNE_NETAPI_MAX_INTERFACES)) return -1;
+ }
+ //find a free entry
+ for(i=0;i<TUNE_NETAPI_MAX_SA;i++)
+ {
+ if (!p->tunnel[i].in_use)
+ {
+ p->tunnel[i].in_use = 2; //pending
+ p->tunnel[i].iface= iface; //save iface
+ return i;
+ }
+ }
+ return -1;
+}
+ //internal: delete an SAr from list
+void netcp_cfgp_delete_sa(
+ NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int sa_slot )
+{
+ if ((sa_slot <0 ) || (sa_slot >= TUNE_NETAPI_MAX_SA))
+ {
+ return ;
+ }
+ p->tunnel[sa_slot].in_use=0;
+ return;
+}
+
+//internal: insert an SA into the list
+void netcp_cfgp_insert_sa(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int sa_slot, //we 'reserved it already'
+ int dir,
+ int mode,
+ void * temp1,
+ void * temp2,
+ void * handle_inflow,
+ void * handle_sideband)
+{
+ p->tunnel[sa_slot].in_use=1;
+ p->tunnel[sa_slot].inbound = dir;
+ p->tunnel[sa_slot].sa_mode = mode;
+ p->tunnel[sa_slot].sa_handle_inflow = handle_inflow;
+ p->tunnel[sa_slot].sa_handle_sideband = handle_sideband;
+ return;
+}
+
+//internal: return nwal_handles for SA
+void *netcp_cfgp_get_sa_handles( NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int sa_slot, void ** p_sideband)
+{
+ if ((sa_slot <0 ) || (sa_slot >= TUNE_NETAPI_MAX_SA)) return NULL;
+ if (!p->tunnel[sa_slot].in_use) return NULL;
+ *p_sideband = p->tunnel[sa_slot].sa_handle_sideband;
+ return p->tunnel[sa_slot].sa_handle_inflow;
+}
+
+
+/*==============================fLOWS=============================*/
+//internal: find a free slot for a flow
+static int netcp_cfgp_find_flow_slot( NETAPI_NWAL_GLOBAL_CONTEXT_T *p)
+{
+ int i;
+ //find a free entry
+ for(i=0;i<TUNE_NETAPI_MAX_FLOWS;i++)
+ {
+ if (!p->flows[i].in_use)
+ {
+ p->flows[i].in_use = 2; //pending
+ return i;
+ }
+ }
+ return -1;
+}
+
+//internal: clear flow slot
+static void netcp_cfgp_delete_flow(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,int slot)
+{
+ if ((slot >=0 ) && (slot < TUNE_NETAPI_MAX_FLOWS))
+ {
+ p->flows[slot].in_use = 0;
+ }
+}
+
+//internal: insert a flow into flow slot
+static NETCP_CFG_FLOW_HANDLE_T netcp_cfgp_insert_flow(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int slot, //we 'reserved it already'
+ void * handle) //cppi flow handle. Save this for delete
+{
+ p->flows[slot].in_use=1;
+ p->flows[slot].handle = handle;
+ p->flows[slot].flow.flowid = Cppi_getFlowId(handle);
+ return (NETCP_CFG_FLOW_HANDLE_T) &p->flows[slot].flow;
+}
+
+//find entry matching the flowid. return slot# and the cppi handle
+static int netcp_cfgp_find_flow(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int flowid,
+ void ** handle)
+{
+int i;
+ *handle=NULL;
+ for(i=0;i<TUNE_NETAPI_MAX_FLOWS;i++)
+ {
+ if ((p->flows[i].in_use)&&(p->flows[i].flow.flowid == flowid))
+ {
+ *handle = p->flows[i].handle;
+ return i;
+ }
+ }
+ return -1;
+}
+
+
+/*============================IP ADDRESSES==========================*/
+
+//internal: find a free slot for IP rule
+static int netcp_cfgp_find_ip_slot(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int iface_no)
+{
+ int i;
+
+ //find a free entry
+ for(i=0;i<TUNE_NETAPI_MAX_IP;i++)
+ {
+ if (!p->ips[i].in_use)
+ {
+ p->ips[i].in_use = 2; //pending
+ return i;
+ }
+ }
+ return -1;
+}
+
+
+//internal: insert an IP address into iface
+static void netcp_cfgp_insert_ip(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ nwal_IpType ipType,
+ nwalIpAddr_t *ip_addr,
+ nwalIpOpt_t *ip_qualifiers,
+ int iface_no,
+ int ip_slot, //we 'reserved it already'
+ void * handle)
+{
+ p->ips[ip_slot].in_use=1;
+ memcpy(&p->ips[ip_slot].ip_addr, ip_addr, sizeof(nwalIpAddr_t));
+ if(ip_qualifiers)
+ memcpy(&p->ips[ip_slot].ip_qualifiers, ip_qualifiers, sizeof(nwalIpOpt_t));
+ else
+ memset(&p->ips[ip_slot].ip_qualifiers, 0, sizeof(nwalIpOpt_t));
+ p->ips[ip_slot].ip_type = ipType;
+ p->ips[ip_slot].nwal_handle = handle;
+ return;
+}
+
+
+//internal: free IP slot associated with ip address
+static void netcp_cfgp_delete_ip(
+ NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int iface_no,
+ int ip_slot )
+{
+ if ((ip_slot <0)||(ip_slot>TUNE_NETAPI_MAX_IP)) return ;
+ p->ips[ip_slot].in_use=0;
+ return;
+}
+
+
+//internal: get IP handle associated with ip address
+static void *netcp_cfgp_get_ip_handle(
+ NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int iface_no,
+ int ip_slot )
+{
+ if ((ip_slot <0)||(ip_slot>=TUNE_NETAPI_MAX_IP)) return NULL;
+ if (!p->ips[ip_slot].in_use) return NULL;
+ return (void *) p->ips[ip_slot].nwal_handle;
+}
+
+/*==========================MAC INTERFACES======================*/
+//internal: insert interface info into global context
+static void netcp_cfgp_insert_mac(NETAPI_NWAL_GLOBAL_CONTEXT_T *p, unsigned char * p_mac,
+ int iface_no, int state, NETCP_CFG_VLAN_T vlan, void * handle)
+{
+ if ((iface_no >=0 ) && (iface_no < TUNE_NETAPI_MAX_INTERFACES))
+ {
+ memset(&p->interfaces[iface_no],0,sizeof(NETCP_INTERFACE_T));
+ p->interfaces[iface_no].in_use = 1;
+ memcpy(&p->interfaces[iface_no].mac[0], p_mac,6);
+ p->interfaces[iface_no].state = state;
+ //todo p->interfaces[iface_no].vlan = vlan;
+ p->interfaces[iface_no].nwal_handle = handle; //save handle assoicated with this rule
+ }
+ else printf(">netcp_cfg insert interface # out of range %d\n",iface_no);
+
+}
+
+//internal: get handle associated with interface
+void* netcp_cfgp_get_mac_handle(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,int iface_no)
+{
+ if (iface_no == NETCP_CFG_NO_INTERFACE) return NULL;
+ if ((iface_no <0 ) || (iface_no >= TUNE_NETAPI_MAX_INTERFACES))
+ {
+ return NULL;
+ }
+ else if ( p->interfaces[iface_no].in_use)
+ {
+ return (void *) p->interfaces[iface_no].nwal_handle;
+ }
+ //no valid entry in slot
+ return NULL;
+}
+//internal: clear inteface entry
+static void netcp_cfgp_delete_mac(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,int iface_no)
+{
+ if ((iface_no >=0 ) && (iface_no < TUNE_NETAPI_MAX_INTERFACES))
+ {
+ p->interfaces[iface_no].in_use = 0;
+ }
+}
+
+
+/*========================CLASSIFIERS==========================*/
+//internal: find a free slot for classifier rule
+static int netcp_cfgp_find_class_slot( NETAPI_NWAL_GLOBAL_CONTEXT_T *p)
+{
+ int i;
+ //find a free entry
+ for(i=0;i<TUNE_NETAPI_MAX_CLASSIFIERS;i++)
+ {
+ if (!p->classi[i].in_use)
+ {
+ p->classi[i].in_use = 2; //pending
+ return i;
+ }
+ }
+ return -1;
+}
+
+ //internal: delete a classifer from list
+static void netcp_cfgp_delete_class(
+ NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int class_slot )
+{
+ if ((class_slot <0 ) || (class_slot >= TUNE_NETAPI_MAX_CLASSIFIERS))
+ {
+ return ;
+ }
+ p->classi[class_slot].in_use=0;
+ return;
+}
+
+//internal: insert a classifier into list
+static void netcp_cfgp_insert_class(NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int class_slot, //we 'reserved it already'
+ int class_type,
+ void * L2_handle,
+ void * L3_handle,
+ void * L4_handle)
+{
+ p->classi[class_slot].in_use=1;
+ p->classi[class_slot].nwal_L2_handle = L2_handle;
+ p->classi[class_slot].nwal_L3_handle = L3_handle;
+ p->classi[class_slot].nwal_L4_handle = L4_handle;
+ p->classi[class_slot].class_type = class_type;
+ return;
+}
+
+//internal: return L4 nwal_handle for class
+static void *netcp_cfgp_get_l4_handle( NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int class_slot)
+{
+ if ((class_slot <0 ) || (class_slot >= TUNE_NETAPI_MAX_CLASSIFIERS)) return NULL;
+ if (!p->classi[class_slot].in_use) return NULL;
+ return p->classi[class_slot].nwal_L4_handle;
+}
+
+//internal: return L3 nwal_handle for class
+static void *netcp_cfgp_get_l3_handle( NETAPI_NWAL_GLOBAL_CONTEXT_T *p,
+ int class_slot)
+{
+ if ((class_slot <0 ) || (class_slot >= TUNE_NETAPI_MAX_CLASSIFIERS)) return NULL;
+ if (!p->classi[class_slot].in_use) return NULL;
+ return p->classi[class_slot].nwal_L3_handle;
+}
+
+
+/***********************************************************************************/
+/****************************************API****************************************/
+/***********************************************************************************/
+
+
+/*****************************************************************
+ * Queury Stats
+ ****************************************************************/
+void netcp_cfgReqStats(NETAPI_T h, NETCP_CFG_STATS_CB cb, int doClear, int *err)
+{
+nwal_RetValue ret;
+NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;
+NetapiNwalTransInfo_t *pTransInfo;
+nwal_TransID_t transId;
+if ((!n) || (!cb)) {*err = NETAPI_ERR_BAD_INPUT; return ;}
+*err =0;
+
+
+pTransInfo = netapip_GetFreeTransInfo((NETAPI_GLOBAL_T *) n->global, &transId);
+if (!pTransInfo) { *err = NETAPI_ERR_BUSY; return ;}
+pTransInfo->transType = NETAPI_NWAL_HANDLE_STAT_REQUEST;
+pTransInfo->netapi_handle = h;
+n->nwal_local.stats_cb = cb;
+ret = nwal_getPAStats( ((NETAPI_GLOBAL_T *) n->global)->nwal_context.nwalInstHandle,
+ transId,
+ NULL,
+ doClear);
+if(ret != nwal_OK)
+{
+ pTransInfo->inUse = nwal_FALSE;
+ *err = NETAPI_ERR_BUSY; //no resources??
+ printf("> netcp_cfg reqStats failed, err=%d\n",ret);
+}
+
+}
+/*****************************************************************
+ * CREATE A MAC INTERFACE
+ ****************************************************************/
+NETCP_CFG_MACIF_T netcp_cfgCreateMacInterface(
+ NETAPI_T h,
+ uint8_t *p_mac,
+ int iface_no,
+ int switch_port,
+ NETCP_CFG_ROUTE_HANDLE_T route,
+ NETCP_CFG_VLAN_T vlan, //future
+ int state, //0=down, 1=up //ignored
+ int * err
+ )
+{
+NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;
+nwalMacParam_t MacInfo= {
+ 0, /* validParams */
+ 0, /* ifNum */
+ 0, /* vlanId */
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, /* Local mac */
+ NWAL_MATCH_ACTION_CONTINUE_NEXT_ROUTE, /* Continue parsing to next route for match */
+ NWAL_NEXT_ROUTE_FAIL_ACTION_HOST, /* For next route fail action by default is route to host */
+ CPPI_PARAM_NOT_SPECIFIED, /* Use default flow configured to NWAL if packet is routed to host */
+ QMSS_PARAM_NOT_SPECIFIED /* Use default queue configured to NWAL if packet is routed to host */
+};
+
+nwal_RetValue retValue;
+NetapiNwalTransInfo_t *pTransInfo;
+nwal_TransID_t trans_id;
+
+ if ((!n) || (!p_mac)) {*err = NETAPI_ERR_BAD_INPUT; return -1;}
+ *err =0;
+
+ pTransInfo = netapip_GetFreeTransInfo((NETAPI_GLOBAL_T *) n->global, &trans_id);
+ if (!pTransInfo) { *err = NETAPI_ERR_BUSY; return -1;}
+ pTransInfo->transType = NETAPI_NWAL_HANDLE_TRANS_MAC;
+ pTransInfo->netapi_handle = h;
+
+ /* set up MacInfo */
+ memcpy(&MacInfo.macAddr,p_mac,6);
+ /* todo: vlan */
+ MacInfo.ifNum = switch_port; /* todo: check for 0/1 relative*/
+
+ if (route != NULL)
+ {
+ netcp_cfgp_build_route(route,&MacInfo.appRxPktFlowId, &MacInfo.appRxPktQueue);
+ }
+ pTransInfo->state == NETAPI_NWAL_HANDLE_STATE_OPEN_PENDING;
+ retValue = nwal_setMacIface( ((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,
+ trans_id,
+ (nwal_AppId) (NETAPI_NETCP_MATCH_GENERIC_MAC | iface_no),
+ &MacInfo,
+ &pTransInfo->handle);
+ if(retValue != nwal_OK)
+ {
+ *err = NETAPI_ERR_NWAL_ERR0;
+ printf (">netcp cfg - ERROR: nwal_setMacIface returned Error Code %d\n",
+ retValue);
+ pTransInfo->inUse = nwal_FALSE;
+ return -1;
+ }
+ //pTransInfo->inUse = nwal_FALSE;
+
+ //wait here until its done since scheduler isn't running yet most likely..
+ // todo: make this handled by scheduler poll later ??
+ if(trans_id != NWAL_TRANSID_SPIN_WAIT)
+ {
+ n->nwal_local.numPendingCfg++;
+ while ((volatile) n->nwal_local.numPendingCfg)
+ {
+ // if response is there, then this poll squirts out in the CTl poll callback,
+ // which handles the rest (including decrmenting #pending!!
+ nwal_pollCtl(((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,NULL,NULL);
+ }
+ }
+ printf (">netcp cfg: MAC i/f %d added\n", iface_no);
+ netcp_cfgp_insert_mac(&netapi_get_global()->nwal_context,
+ p_mac, iface_no, state,vlan,
+ (void *) pTransInfo->handle);
+ pTransInfo->state = NETAPI_NWAL_HANDLE_STATE_IDLE;
+ pTransInfo->inUse = nwal_FALSE;
+ return (NETAPI_NETCP_MATCH_GENERIC_MAC | iface_no);
+}
+
+
+/*****************************************************************/
+/***************Delete a mac interface****************************/
+/*****************************************************************/
+void netcp_cfgDelMac(NETAPI_T h,int iface_no, int *err)
+{
+ nwal_RetValue ret;
+ NetapiNwalTransInfo_t *pTransInfo;
+ nwal_TransID_t trans_id;
+ NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;
+ void * ifHandle;
+
+ //get the nwal handle assoicated with this iface
+ ifHandle = netcp_cfgp_get_mac_handle(&netapi_get_global()->nwal_context, iface_no );
+ if(!ifHandle)
+ {*err = NETAPI_ERR_BAD_INPUT; return ;}
+ *err =0;
+
+ //get a transaction id
+ pTransInfo = netapip_GetFreeTransInfo((NETAPI_GLOBAL_T *) n->global, &trans_id);
+ if (!pTransInfo) { *err = NETAPI_ERR_BUSY; return ;}
+ pTransInfo->transType = NETAPI_NWAL_HANDLE_TRANS_MAC;
+ pTransInfo->netapi_handle = h;
+ //issue request
+ ret = nwal_delMacIface(
+ ((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,
+ trans_id,
+ ifHandle);
+ if(ret != nwal_OK)
+ {
+ *err = NETAPI_ERR_NWAL_ERR0;
+ printf (">netcp cfg - ERROR: nwal_delMacIface returned Error Code %d\n",
+ ret);
+ pTransInfo->inUse = nwal_FALSE;
+ return ;
+ }
+ //wait here until its done since scheduler isn't running yet most likely..
+ // todo: make this handled by scheduler poll later ??
+ if(trans_id != NWAL_TRANSID_SPIN_WAIT)
+ {
+ n->nwal_local.numPendingCfg++;
+ while ((volatile) n->nwal_local.numPendingCfg)
+ {
+ // if response is there, then this poll squirts out in the CTl poll callback,
+ // which handles the rest (including decrmenting #pending!!
+ nwal_pollCtl(((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,NULL,NULL);
+ }
+ }
+ printf (">netcp cfg: MAC i/f %d deleted\n",iface_no);
+ pTransInfo->state = NETAPI_NWAL_HANDLE_STATE_IDLE;
+ pTransInfo->inUse = nwal_FALSE;
+ //zap the entry
+ netcp_cfgp_delete_mac(&netapi_get_global()->nwal_context, iface_no);
+ return ;
+}
+
+
+/*****************************************************************/
+/***************Add IP to MAC interface (internal)****************/
+/*****************************************************************/
+static NETCP_CFG_IP_T netcp_cfgAddIpInternal(
+ NETAPI_T h,
+ int iface_no,
+ nwal_IpType ipType,
+ nwalIpAddr_t * ip_addr,
+ nwalIpOpt_t * ip_qualifiers,
+ NETCP_CFG_ROUTE_HANDLE_T route, //NULL for default
+ int * err,
+ int flag) //TRUE: add IP to iface. False: add IP as part of classifier
+{
+NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;
+void * n_handle=NULL;
+nwalIpParam_t nwalIpParam= {
+ pa_IPV4, /* IP Type */
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* Dest IP */
+ { 0x0,0,0,0},/* IP Options */
+ NWAL_MATCH_ACTION_CONTINUE_NEXT_ROUTE, /* Continue parsing to next route for match */
+ NWAL_NEXT_ROUTE_FAIL_ACTION_HOST, /* For next route fail action by default is route to host */
+ CPPI_PARAM_NOT_SPECIFIED, /* Use default flow configured to NWAL if packet is routed to host */
+ QMSS_PARAM_NOT_SPECIFIED /* Use default queue configured to NWAL if packet is routed to host */
+};
+nwal_RetValue retValue;
+NetapiNwalTransInfo_t *pTransInfo;
+nwal_TransID_t trans_id;
+int ip_slot=-1;
+NETCP_CFG_IP_T ip_rule_id;
+NETCP_CFG_IP_T temp;
+
+ //verify that iface has been configured
+ if (iface_no != NETCP_CFG_NO_INTERFACE)
+ {
+ if ((iface_no<0) || (iface_no>= TUNE_NETAPI_MAX_INTERFACES)) {*err = NETAPI_ERR_BAD_INPUT; return -1;}
+ }
+
+ if (iface_no != NETCP_CFG_NO_INTERFACE)
+ {
+ if(netapi_get_global()->nwal_context.interfaces[iface_no].in_use)
+ {
+ n_handle = netapi_get_global()->nwal_context.interfaces[iface_no].nwal_handle;
+ }
+ else
+ {
+ *err = NETAPI_ERR_BAD_INPUT;
+ return -1;
+ }
+ }
+ if (flag) //if adding IP to MAC then reserve a slot to save info
+ {
+ //find free slot for IP & reserve
+ ip_slot= netcp_cfgp_find_ip_slot(&netapi_get_global()->nwal_context,
+ iface_no);
+ if (ip_slot <0)
+ {
+ *err= NETAPI_ERR_NOMEM; //no room
+ return -1;
+ }
+ }
+
+ //get a transaction object for config action
+ pTransInfo = netapip_GetFreeTransInfo((NETAPI_GLOBAL_T *) n->global, &trans_id);
+ if (!pTransInfo) { *err = NETAPI_ERR_BUSY; return -1;}
+ pTransInfo->transType = NETAPI_NWAL_HANDLE_TRANS_IP;
+ pTransInfo->netapi_handle = h;
+
+ //build nwalIpParam
+ memcpy(&nwalIpParam.locIpAddr,ip_addr, sizeof(nwalIpAddr_t));
+ nwalIpParam.ipType=ipType;
+ if(route)
+ {
+ netcp_cfgp_build_route(route,&nwalIpParam.appRxPktFlowId, &nwalIpParam.appRxPktQueue);
+ }
+ else{} //use nwal defaults
+ if (ip_qualifiers)
+ memcpy(&nwalIpParam.ipOpt,ip_qualifiers, sizeof(nwalIpOpt_t));
+ else
+ memset(&nwalIpParam.ipOpt,0, sizeof(nwalIpOpt_t));
+
+ //build the rule id that will be returned when a packet matches
+ if (flag)
+ ip_rule_id = NETAPI_NETCP_MATCH_GENERIC_IP | iface_no | ((ip_slot&&0xff)<<8);
+ else
+ ip_rule_id = (NETAPI_NETCP_MATCH_CLASS_L3 | iface_no);
+
+ //perform config action
+ pTransInfo->state == NETAPI_NWAL_HANDLE_STATE_OPEN_PENDING;
+ retValue = nwal_setIPAddr( netapi_get_global()->nwal_context.nwalInstHandle,
+ trans_id,
+ (nwal_AppId) (ip_rule_id),
+ n_handle,
+ &nwalIpParam,
+ &pTransInfo->handle);
+
+ if(retValue != nwal_OK)
+ {
+ *err = NETAPI_ERR_NWAL_ERR0;
+ printf (">netcp cfg: nwal_setIP returned Error Code %d\n",
+ retValue);
+ pTransInfo->inUse = nwal_FALSE;
+ //zap the entry
+ if (flag)
+ {
+ netcp_cfgp_delete_ip(&netapi_get_global()->nwal_context,
+ iface_no,
+ ip_slot);
+ }
+ return -1;
+ }
+ //wait here until its done since scheduler isn't running yet most likely..
+ // todo: make this handled by scheduler poll later ??
+ if(trans_id != NWAL_TRANSID_SPIN_WAIT)
+ {
+ n->nwal_local.numPendingCfg++;
+ while ((volatile) n->nwal_local.numPendingCfg)
+ {
+ // if response is there, then this poll squirts out in the CTl poll callback,
+ // which handles the rest (including decrmenting #pending!!
+ nwal_pollCtl(((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,NULL,NULL);
+ }
+ }
+ if (flag)
+ {
+ printf (">netcp cfg: IP added to interface %d (slot%d)\n", iface_no, ip_slot);
+ netcp_cfgp_insert_ip(&netapi_get_global()->nwal_context, ipType,
+ ip_addr, ip_qualifiers, iface_no, ip_slot,
+ pTransInfo->handle);
+ }
+ pTransInfo->state = NETAPI_NWAL_HANDLE_STATE_IDLE;
+ temp = (NETCP_CFG_IP_T) pTransInfo->handle;
+ pTransInfo->inUse = nwal_FALSE;
+ return (flag ? ip_rule_id: temp);
+}
+/*****************************************************************/
+/***************Add IP to MAC interface **************************/
+/*****************************************************************/
+NETCP_CFG_IP_T netcp_cfgAddIp(
+ NETAPI_T h,
+ int iface_no,
+ nwal_IpType ipType,
+ nwalIpAddr_t * ip_addr,
+ nwalIpOpt_t * ip_qualifiers,
+ NETCP_CFG_ROUTE_HANDLE_T route, //NULL for default
+ int * err
+ )
+{
+ return netcp_cfgAddIpInternal(
+ h, iface_no, ipType, ip_addr, ip_qualifiers, route, err,
+ 1);
+}
+
+/*****************************************************************/
+/***************Delete an attached IP*****************************/
+/*****************************************************************/
+static void netcp_cfgDelIpInternal(NETAPI_T h, int iface_no, nwal_IpType ipType,
+ nwalIpAddr_t * ip_addr,
+ nwalIpOpt_t * ip_qualifiers,
+ NETCP_CFG_IP_T ip_rule_id,
+ int *err,
+ void * handle, /* if flag==0, handle must be valid */
+ int flag) /* flag==0 => delete IP rule that was part of classifier, not interface */
+{
+ nwal_RetValue ret;
+ NetapiNwalTransInfo_t *pTransInfo;
+ nwal_TransID_t trans_id;
+ NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;
+ void * ifHandle;
+ int ip_slot = (ip_rule_id>>8)&0xff;
+
+ //get the nwal handle assoicated with this ip
+ if (flag)
+ {
+ ifHandle = netcp_cfgp_get_ip_handle(
+ &netapi_get_global()->nwal_context, iface_no,
+ ip_slot );
+ }
+ else
+ {
+ ifHandle = handle;
+ }
+ if(!ifHandle)
+ {*err = NETAPI_ERR_BAD_INPUT; return ;}
+ *err =0;
+
+ //get a transaction id
+ pTransInfo = netapip_GetFreeTransInfo((NETAPI_GLOBAL_T *) n->global, &trans_id);
+ if (!pTransInfo) { *err = NETAPI_ERR_BUSY; return ;}
+ pTransInfo->transType = NETAPI_NWAL_HANDLE_TRANS_IP;
+ pTransInfo->netapi_handle = h;
+ //issue request
+ ret = nwal_delIPAddr(
+ ((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,
+ trans_id,
+ ifHandle);
+ if(ret != nwal_OK)
+ {
+ *err = NETAPI_ERR_NWAL_ERR0;
+ printf (">netcp cfg - ERROR: nwal_delMacIface returned Error Code %d\n",
+ ret);
+ pTransInfo->inUse = nwal_FALSE;
+ return ;
+ }
+ //wait here until its done since scheduler isn't running yet most likely..
+ // todo: make this handled by scheduler poll later ??
+ if(trans_id != NWAL_TRANSID_SPIN_WAIT)
+ {
+ n->nwal_local.numPendingCfg++;
+ while ((volatile) n->nwal_local.numPendingCfg)
+ {
+ // if response is there, then this poll squirts out in the CTl poll callback,
+ // which handles the rest (including decrmenting #pending!!
+ nwal_pollCtl(((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,NULL,NULL);
+ }
+ }
+ if (flag)
+ printf (">netcp cfg: attached IP deleted\n");
+ else
+ printf (">netcp cfg: Classifier IP rule deleted\n");
+ pTransInfo->state = NETAPI_NWAL_HANDLE_STATE_IDLE;
+ pTransInfo->inUse = nwal_FALSE;
+
+ //zap the entry
+ if (flag)
+ netcp_cfgp_delete_ip(&netapi_get_global()->nwal_context,
+ iface_no,
+ ip_slot);
+ return ;
+}
+
+/*****************************************************************/
+/***************Delete an attached IP*****************************/
+/*****************************************************************/
+void netcp_cfgDelIp(NETAPI_T h, int iface_no, nwal_IpType ipType,
+ nwalIpAddr_t * ip_addr,
+ nwalIpOpt_t * ip_qualifiers,
+ NETCP_CFG_IP_T ip_rule_id,
+ int *err)
+{
+ netcp_cfgDelIpInternal( h, iface_no, ipType,
+ ip_addr, ip_qualifiers, ip_rule_id,
+ err, NULL, 1);
+ return;
+}
+
+
+/**
+ * @def netcp_cfgAddClass
+ * @brief add a classifier rule into NETCP
+ **/
+NETCP_CFG_CLASS_T netcp_cfgAddClass(NETAPI_T h,
+ NETCP_CFG_CLASSIFIER_T *p_class,
+ NETCP_CFG_ROUTE_HANDLE_T route,
+ int action, int * err)
+{
+NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;
+void * l3_handle=NULL; //ip handle
+nwal_RetValue retValue;
+NetapiNwalTransInfo_t *pTransInfo;
+nwal_TransID_t trans_id;
+int class_slot=-1;
+int iface_no;
+int ip_slot=-1;
+NETCP_CFG_CLASS_T classHandle; //returned by us
+nwal_appProtoType_t proto;
+nwalLocConnCfg_t tempCfg={
+0, //nwal_handle: to be filled in
+{0}, // l4 ports: to be filled in
+0, //core id (NA)
+0, //action
+CPPI_PARAM_NOT_SPECIFIED, //flow id
+QMSS_PARAM_NOT_SPECIFIED, //dest queue
+};
+
+if(!p_class) { *err=NETAPI_ERR_BAD_INPUT; return -1;}
+switch(p_class->classType)
+{
+default:
+ printf(">netcp_cfg : classifier type %d not supported\n",p_class->classType);
+ break;
+case(NETCP_CFG_CLASS_TYPE_L3_L4):
+case(NETCP_CFG_CLASS_TYPE_L4):
+ //assume just type l4 only (L2, L3 defined by iface, l3 id )
+ iface_no = p_class->u.c_l4.iface;
+ if (p_class->classType== NETCP_CFG_CLASS_TYPE_L4)
+ {
+ ip_slot = (p_class->u.c_l4.ip>>8)&0xff;
+ }
+
+ //verify that iface has been configured
+ if (iface_no != NETCP_CFG_NO_INTERFACE)
+ {
+ if(!netapi_get_global()->nwal_context.interfaces[iface_no].in_use)
+ {
+ *err = NETAPI_ERR_BAD_INPUT;
+ return -1;
+ }
+ }
+
+ if (p_class->classType== NETCP_CFG_CLASS_TYPE_L4)
+ {
+ //verify that ip has been configured and get its handle
+ l3_handle = netcp_cfgp_get_ip_handle(
+ &netapi_get_global()->nwal_context, iface_no,
+ ip_slot );
+ }
+ else
+ {
+ nwalIpParam_t tempParam={
+ pa_IPV4, /* IP Type */
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* Dest IP */
+ { 0x0,0,0,0},/* IP Options */
+ NWAL_MATCH_ACTION_CONTINUE_NEXT_ROUTE, /* Continue parsing to next route for match */
+ NWAL_NEXT_ROUTE_FAIL_ACTION_HOST, /* For next route fail action by default is route to host */
+ CPPI_PARAM_NOT_SPECIFIED, /* Use default flow configured to NWAL if packet is routed to host */
+ QMSS_PARAM_NOT_SPECIFIED /* Use default queue configured to NWAL if packet is routed to host */
+ };
+ //build nwalIpParam
+ memcpy(&tempParam.locIpAddr,p_class->u.c_l3_l4.ip_addr, sizeof(nwalIpAddr_t));
+ tempParam.ipType=p_class->u.c_l3_l4.ipType;
+ //use nwal defauls for route
+ if (p_class->u.c_l3_l4.ip_qualifiers)
+ memcpy(&tempParam.ipOpt,p_class->u.c_l3_l4.ip_qualifiers, sizeof(nwalIpOpt_t));
+ else
+ memset(&tempParam.ipOpt,0, sizeof(nwalIpOpt_t));
+
+
+ //find if we have a matching L3 handle for IP classifier; if not create it
+ retValue = nwal_getIPAddr (((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,
+ &tempParam,
+ netcp_cfgp_get_mac_handle(&netapi_get_global()->nwal_context, iface_no ),
+ &l3_handle);
+ if (retValue != nwal_TRUE)
+ {
+ int ret;
+ //**NEW IP RULE
+ //need to attach this IP RULE to the MAC
+ l3_handle= (void *) netcp_cfgAddIpInternal(
+ h, iface_no,
+ p_class->u.c_l3_l4.ipType,
+ p_class->u.c_l3_l4.ip_addr,
+ p_class->u.c_l3_l4.ip_qualifiers,
+ p_class->u.c_l3_l4.p_fail_route,
+ &ret,
+ FALSE);
+ if(!ret)
+ {
+ l3_handle=NULL;
+ }
+ }
+ }
+ if(!l3_handle)
+ {*err = NETAPI_ERR_BAD_INPUT; return -1 ;}
+
+
+ //find free slot for CLASS & reserve
+ class_slot= netcp_cfgp_find_class_slot(&netapi_get_global()->nwal_context);
+ if(class_slot<0) {*err = NETAPI_ERR_NOMEM; return -1;}
+ classHandle = NETAPI_NETCP_MATCH_CLASS | (class_slot<<8) | (iface_no&0xff);
+ //build request from template
+ tempCfg.inHandle=l3_handle;
+ if (p_class->classType== NETCP_CFG_CLASS_TYPE_L4)
+ {
+ memcpy(&tempCfg.appProto,&p_class->u.c_l4.appProto,sizeof(nwalAppProto_t));
+ proto = p_class->u.c_l4.proto;
+ }
+ else
+ {
+ memcpy(&tempCfg.appProto,&p_class->u.c_l3_l4.appProto,sizeof(nwalAppProto_t));
+ proto = p_class->u.c_l3_l4.proto;
+ }
+
+ tempCfg.matchAction = (action==NETCP_CFG_ACTION_TO_SW) ? NWAL_MATCH_ACTION_HOST : NWAL_MATCH_ACTION_DISCARD;
+ if (route)
+ {
+ netcp_cfgp_build_route(route,&tempCfg.appRxPktFlowId, &tempCfg.appRxPktQueue);
+ }
+
+ //get a transaction id
+ pTransInfo = netapip_GetFreeTransInfo((NETAPI_GLOBAL_T *) n->global, &trans_id);
+ if (!pTransInfo) { *err = NETAPI_ERR_BUSY; return -1 ;}
+ pTransInfo->transType = NETAPI_NWAL_HANDLE_TRANS_IP; /* todo: fix this to TRANS_L4*/
+ pTransInfo->netapi_handle = h;
+ //issue request
+ retValue = nwal_addConn(((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,
+ trans_id,
+ (nwal_AppId) classHandle,
+ proto,
+ &tempCfg,
+ NULL,
+ &pTransInfo->handle);
+ if(retValue != nwal_OK)
+ {
+ *err = NETAPI_ERR_NWAL_ERR0;
+ printf (">netcp cfg - ERROR: nwal_addConn returned Error Code %d\n",
+ retValue);
+ pTransInfo->inUse = nwal_FALSE;
+ netcp_cfgp_delete_class(&netapi_get_global()->nwal_context, class_slot);
+ return -1;
+ }
+ //wait here until its done since scheduler isn't running yet most likely..
+ // todo: make this handled by scheduler poll later ??
+ if(trans_id != NWAL_TRANSID_SPIN_WAIT)
+ {
+ n->nwal_local.numPendingCfg++;
+ while ((volatile) n->nwal_local.numPendingCfg)
+ {
+ // if response is there, then this poll squirts out in the CTl poll callback,
+ // which handles the rest (including decrmenting #pending!!
+ nwal_pollCtl(((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,NULL,NULL);
+ }
+ }
+ printf (">netcp cfg: L4 Classifer added to interface %d ip %d (slot%d)\n", iface_no, ip_slot, class_slot);
+ netcp_cfgp_insert_class(&netapi_get_global()->nwal_context,
+ class_slot,
+ p_class->classType,
+ NULL, //L2 we have
+ (p_class->classType== NETCP_CFG_CLASS_TYPE_L3_L4? l3_handle : NULL),
+ pTransInfo->handle);
+ pTransInfo->state = NETAPI_NWAL_HANDLE_STATE_IDLE;
+ return classHandle;
+} //end switch
+return -1;
+}
+
+//delete classifier
+void netcp_cfgDelClass(NETAPI_T h,
+ NETCP_CFG_CLASS_T classId,
+ int *err)
+{
+NETAPI_HANDLE_T * n = (NETAPI_HANDLE_T *) h;
+void * L4_handle; //class handle -> L4
+void * L3_handle; //class handle -> L3
+nwal_RetValue retValue;
+NetapiNwalTransInfo_t *pTransInfo;
+nwal_TransID_t trans_id;
+int class_slot=-1;
+//int iface;
+//int ip_slot;
+
+ class_slot = (classId>>8)&0xffff;
+ L4_handle=netcp_cfgp_get_l4_handle(
+ &netapi_get_global()->nwal_context,
+ class_slot );
+ if(!L4_handle) {*err = NETAPI_ERR_BAD_INPUT; return ;}
+ L3_handle = netcp_cfgp_get_l3_handle(
+ &netapi_get_global()->nwal_context,
+ class_slot );
+ /* l3 handle might be NULL,, depending on type of classifier */
+
+ netcp_cfgp_delete_class(
+ &netapi_get_global()->nwal_context,
+ class_slot );
+ //get a transaction id
+ pTransInfo = netapip_GetFreeTransInfo((NETAPI_GLOBAL_T *) n->global, &trans_id);
+ if (!pTransInfo) { *err = NETAPI_ERR_BUSY; return ;}
+ pTransInfo->transType = NETAPI_NWAL_HANDLE_TRANS_IP;
+ pTransInfo->netapi_handle = h;
+ //issue request for L4
+ retValue = nwal_delConn(((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,
+ trans_id,
+ L4_handle);
+ if(retValue != nwal_OK)
+ {
+ *err = NETAPI_ERR_NWAL_ERR0;
+ printf (">netcp cfg - ERROR: nwal_delConn returned Error Code %d\n",
+ retValue);
+ pTransInfo->inUse = nwal_FALSE;
+ return ; /* todo: what about the L3? */
+ }
+ //wait here until its done since scheduler isn't running yet most likely..
+ // todo: make this handled by scheduler poll later ??
+ if(trans_id != NWAL_TRANSID_SPIN_WAIT)
+ {
+ n->nwal_local.numPendingCfg++;
+ while ((volatile) n->nwal_local.numPendingCfg)
+ {
+ // if response is there, then this poll squirts out in the CTl poll callback,
+ // which handles the rest (including decrmenting #pending!!
+ nwal_pollCtl(((NETAPI_GLOBAL_T*) (n->global))->nwal_context.nwalInstHandle,NULL,NULL);
+ }
+ }
+ printf (">netcp cfg: Classifer deleted\n");
+ pTransInfo->state = NETAPI_NWAL_HANDLE_STATE_IDLE;
+ pTransInfo->inUse = nwal_FALSE;
+
+ /* delete L3 if we have to */
+ if (L3_handle)
+ {
+ netcp_cfgDelIpInternal( h, 0, 0,
+ NULL, NULL, 0,
+ err, L3_handle, 0);
+ }
+ return ;
+}
+
+
+/*--------------flow management--------*/
+// ADD A Flow
+NETCP_CFG_FLOW_HANDLE_T netcp_cfgAddFlow(NETAPI_T h,
+ int n,
+ Pktlib_HeapHandle handles[],
+ int sizes[],
+ int byte_offset,
+ int * err )
+{
+ Cppi_RxFlowCfg rxFlowCfg;
+ Uint8 isAlloc;
+ Qmss_QueueHnd rxBufQ[TUNE_NETAPI_MAX_BUF_POOLS_IN_FLOW];
+ Uint32 rxBufSize[TUNE_NETAPI_MAX_BUF_POOLS_IN_FLOW];
+ int i;
+ Cppi_FlowHnd FlowHnd;
+ int slot;
+ NETCP_CFG_FLOW_HANDLE_T retVal;
+
+ *err= 0; /* ok */
+ //get a slot to save new flow
+ slot = netcp_cfgp_find_flow_slot(&netapi_get_global()->nwal_context);
+ if (slot<0) { *err= NETAPI_ERR_NOMEM; return NULL; }
+
+ //configure flow
+ memset(&rxFlowCfg,0,sizeof(Cppi_RxFlowCfg));
+ for (i = 0; i < TUNE_NETAPI_MAX_BUF_POOLS_IN_FLOW; i++)
+ {
+ if (i >= n)
+ {
+ rxBufQ[i] = 0;
+ rxBufSize[i] = 0;
+ } else
+ {
+ rxBufQ[i] = Pktlib_getInternalHeapQueue(handles[i]);
+ //todo: verity sizes< heapsize
+ //todo: verify order
+ rxBufSize[i]= sizes[i];
+ }
+ if (i && (rxBufQ[i] <= 0))
+ {
+ rxBufQ[i] = rxBufQ[i-1];
+ rxBufSize[i] = 0;
+ }
+ }
+ /* Configure Rx flow */
+ rxFlowCfg.flowIdNum = CPPI_PARAM_NOT_SPECIFIED;
+ rxFlowCfg.rx_dest_qnum = 100; //DANGEROUS> TODO PUT VALID Q HERE
+ rxFlowCfg.rx_dest_qmgr = 0;
+ rxFlowCfg.rx_sop_offset = byte_offset;
+ rxFlowCfg.rx_ps_location = Cppi_PSLoc_PS_IN_DESC;
+ rxFlowCfg.rx_desc_type = Cppi_DescType_HOST;
+ rxFlowCfg.rx_error_handling = 0;
+
+ rxFlowCfg.rx_psinfo_present = 1;
+ rxFlowCfg.rx_einfo_present = 1;
+
+ rxFlowCfg.rx_dest_tag_lo = 0;
+ rxFlowCfg.rx_dest_tag_hi = 0;
+ rxFlowCfg.rx_src_tag_lo = 0;
+ rxFlowCfg.rx_src_tag_hi = 0;
+
+ rxFlowCfg.rx_size_thresh0_en = rxBufSize[1] ? 1 : 0;
+ rxFlowCfg.rx_size_thresh1_en = rxBufSize[2] ? 1 : 0;
+ rxFlowCfg.rx_size_thresh2_en = rxBufSize[3] ? 1 : 0;
+
+ rxFlowCfg.rx_dest_tag_lo_sel = 0;
+ rxFlowCfg.rx_dest_tag_hi_sel = 0;
+ rxFlowCfg.rx_src_tag_lo_sel = 0;
+ rxFlowCfg.rx_src_tag_hi_sel = 0;
+
+ rxFlowCfg.rx_fdq1_qnum = rxBufQ[1];
+ rxFlowCfg.rx_fdq1_qmgr = 0;
+ rxFlowCfg.rx_fdq2_qnum = rxBufQ[2];
+
+ rxFlowCfg.rx_fdq2_qmgr = 0;
+ rxFlowCfg.rx_fdq3_qnum = rxBufQ[3];
+
+ rxFlowCfg.rx_fdq3_qmgr = 0;
+
+ rxFlowCfg.rx_size_thresh0 = rxBufSize[1] ? rxBufSize[0] : 0;
+ rxFlowCfg.rx_size_thresh1 = rxBufSize[2] ? rxBufSize[1] : 0;
+ rxFlowCfg.rx_size_thresh2 = rxBufSize[3] ? rxBufSize[2] : 0;
+
+ rxFlowCfg.rx_fdq0_sz0_qnum = rxBufQ[0];
+ rxFlowCfg.rx_fdq0_sz0_qmgr = 0;
+ rxFlowCfg.rx_fdq0_sz1_qnum = rxBufQ[1];
+ rxFlowCfg.rx_fdq0_sz1_qmgr = 0;
+ rxFlowCfg.rx_fdq0_sz2_qnum = rxBufQ[2];
+ rxFlowCfg.rx_fdq0_sz2_qmgr = 0;
+ rxFlowCfg.rx_fdq0_sz3_qnum = rxBufQ[3];
+ rxFlowCfg.rx_fdq0_sz3_qmgr = 0;
+
+ {
+ //todo: replace this with a nwal call to get global cntx info
+ Cppi_CpDmaInitCfg cpdmaCfg;
+ memset(&cpdmaCfg,0,sizeof(Cppi_CpDmaInitCfg));
+ cpdmaCfg.dmaNum = Cppi_CpDma_PASS_CPDMA;
+ FlowHnd =
+ Cppi_configureRxFlow (Cppi_open (&cpdmaCfg), &rxFlowCfg, &isAlloc);
+}
+ if (FlowHnd == NULL)
+ {
+ *err= NETAPI_ERR_NORES;
+ netcp_cfgp_delete_flow(&netapi_get_global()->nwal_context, slot);
+ return (NULL);
+ }
+
+ //update slot
+ retVal = netcp_cfgp_insert_flow(&netapi_get_global()->nwal_context, slot, (void*) FlowHnd);
+ printf(">netcp cfg: flow %d created\n", ((NETCP_CFG_FLOW_T *) retVal)->flowid);
+ return ( retVal);
+
+
+
+}
+
+//Delete a flow
+void netcp_cfgDelFlow(NETAPI_T h , NETCP_CFG_FLOW_HANDLE_T f , int * err)
+{
+ int slot;
+ void * handle;
+ *err=0;
+ /* find entry */
+ slot = netcp_cfgp_find_flow(&netapi_get_global()->nwal_context, ((NETCP_CFG_FLOW_T *) f) ->flowid, &handle);
+ if (slot<0) {*err = NETAPI_ERR_BAD_INPUT; return;}
+
+ Cppi_closeRxFlow( (Cppi_FlowHnd) handle);
+ netcp_cfgp_delete_flow(&netapi_get_global()->nwal_context, slot);
+ printf(">netcp cfg: flow %d deleted\n", ((NETCP_CFG_FLOW_T *) f)->flowid);
+ return;
+}
+
+
+/*************************************************************************/
+/*********************************INTERNAL*******************************/
+/************************************************************************/
+
+/***************************************************************
+ ********************METCP CMD Reply Callback******************
+ ***************************************************************/
+void netapi_NWALCmdCallBack (nwal_AppId appHandle,
+ uint16_t trans_id,
+ nwal_RetValue ret)
+{
+ NetapiNwalTransInfo_t * p_trans;
+ NETAPI_NWAL_LOCAL_CONTEXT_T *p_local=NULL;
+
+ if(trans_id == NWAL_TRANSID_SPIN_WAIT)
+ {
+ netapi_get_global()->nwal_context.numBogusTransIds++;
+ return;
+ }
+
+ p_trans= &netapi_get_global()->nwal_context.transInfos[trans_id];
+ p_local =&((NETAPI_HANDLE_T*) (p_trans->netapi_handle))->nwal_local;
+
+ if(ret != nwal_OK)
+ {
+ printf (">netcp cfg : NWALCmdCallBack returned Error Code %d\n",
+ ret);
+ //todo: atomic inc
+ netapi_get_global()->nwal_context.numCmdFail++;
+ }
+ else
+ {
+ //todo: atomic inc
+ netapi_get_global()->nwal_context.numCmdPass++;
+ switch(p_trans->transType)
+ {
+ case NETAPI_NWAL_HANDLE_TRANS_MAC:
+ {
+ if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_OPEN_PENDING)
+ {
+ p_trans->state =NETAPI_NWAL_HANDLE_STATE_OPEN;
+ }
+ else if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_CLOSE_PENDING)
+ {
+ p_trans->state =NETAPI_NWAL_HANDLE_STATE_IDLE;
+ }
+ break;
+ }
+ case NETAPI_NWAL_HANDLE_TRANS_IP:
+ {
+ if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_OPEN_PENDING)
+ {
+ p_trans->state =NETAPI_NWAL_HANDLE_STATE_OPEN;
+ }
+ else if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_CLOSE_PENDING)
+ {
+ p_trans->state =NETAPI_NWAL_HANDLE_STATE_IDLE;
+ }
+ break;
+ }
+ case NETAPI_NWAL_HANDLE_TRANS_PORT:
+ {
+ if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_OPEN_PENDING)
+ {
+ p_trans->state =NETAPI_NWAL_HANDLE_STATE_OPEN;
+ }
+ else if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_CLOSE_PENDING)
+ {
+ p_trans->state =NETAPI_NWAL_HANDLE_STATE_IDLE;
+ }
+ break;
+ }
+ case NETAPI_NWAL_HANDLE_TRANS_SA:
+ {
+ if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_OPEN_PENDING)
+ {
+ p_trans->state =NETAPI_NWAL_HANDLE_STATE_OPEN;
+ }
+ else if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_CLOSE_PENDING)
+ {
+ p_trans->state =NETAPI_NWAL_HANDLE_STATE_IDLE;
+ }
+ break;
+ }
+ case NETAPI_NWAL_HANDLE_TRANS_SA_POLICY:
+ {
+ if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_OPEN_PENDING)
+ {
+ p_trans->state =NETAPI_NWAL_HANDLE_STATE_OPEN;
+ }
+ else if(p_trans->state == NETAPI_NWAL_HANDLE_STATE_CLOSE_PENDING)
+